久久精品五月,日韩不卡视频在线观看,国产精品videossex久久发布 ,久久av综合

站長資訊網
最全最豐富的資訊網站

CentOS 7.6 部署ELK日志分析系統步驟

記錄在CentOS 7.6下部署ELK日志分析系統的過程步驟,希望對大家有所幫助。

下載elasticsearch

創建elk用戶并授權
useradd elk
chown -R elk:elk /home/elk/elasticsearch
chown -R elk:elk /home/elk/elasticsearch1
chown -R elk:elk /home/elk/elasticsearch2
mkdir -p /home/eladata
mkdir -p /var/log/elk
chown -R elk:elk /home/eladata
chown -R elk:elk /var/log/elk

主節點master

elasticsearch解壓,修改配置文件
/home/elk/elasticsearch/config
[root@localhost config]# grep -v  “^#” elasticsearch.yml
cluster.name: my-application
node.name: node0
node.master: true
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: “*”
network.host: 192.168.1.70
http.port: 9200
transport.tcp.port: 9301
discovery.zen.minimum_master_nodes: 1
cluster.initial_master_nodes: [“node0”]

手動啟動命令
su elk -l -c ‘/home/elk/elasticsearch/bin/elasticsearch -d’

啟動文件 elasticsearch.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch
Environment=ES_PATH_CONF=/home/elk/elasticsearch/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid –quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

[root@localhost system]#

Node1節點
/home/elk/elasticsearch1/config
[root@localhost config]# grep -v  “^#” elasticsearch.yml
cluster.name: my-application
node.name: node1
node.master: false
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: “*”
network.host: 192.168.1.70
transport.tcp.port: 9303
http.port: 9302
discovery.zen.ping.unicast.hosts: [“192.168.1.70:9301”]
[root@localhost config]#

手動啟動命令
su elk -l -c ‘/home/elk/elasticsearch1/bin/elasticsearch1 -d’

啟動文件 elasticsearch1.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch1.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch1
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch1
Environment=ES_PATH_CONF=/home/elk/elasticsearch1/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch1/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid –quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

[root@localhost system]#

Node2節點
/home/elk/elasticsearch2/config
[root@localhost config]# grep -v  “^#” elasticsearch.yml
cluster.name: my-application
node.name: node2
node.attr.rack: r1
node.master: false
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: “*”
network.host: 192.168.1.70
http.port: 9203
transport.tcp.port: 9304
discovery.zen.ping.unicast.hosts: [“192.168.1.70:9301”]
discovery.zen.minimum_master_nodes: 1
[root@localhost config]#

手動啟動命令
su elk -l -c ‘/home/elk/elasticsearch2/bin/elasticsearch2 -d’

啟動文件 elasticsearch2.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch2.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch2
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch2
Environment=ES_PATH_CONF=/home/elk/elasticsearch2/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch2
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid –quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target

[root@localhost system]#

下載logstash

目錄如下,默認配置即可
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]#

手動啟動命令
./logstash -f ../dev.conf
nohup ./logstash -f ../dev.conf &

下載kibana

配置文件如下
[root@localhost config]# pwd
/home/elk/kibana/config
[root@localhost config]# grep -v  “^#” kibana.yml
server.host: “192.168.1.70”
elasticsearch.hosts: [“http://192.168.1.70:9200”]
kibana.index: “.kibana”
i18n.locale: “zh-CN”

手動啟動命令
./kibana
nohup ./kibana &

kibana啟動文件
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kibana.service
[Unit]
Description=Kibana  Server Manager
[Service]
ExecStart=/home/elk/kibana/bin/kibana
[Install]
WantedBy=multi-user.target
[root@localhost system]#

端口為:5601 訪問:192.168.1.70:5601

安裝Elasticsearch -head
yum install git npm
git clone https://github.com/mobz/elasticsearch-head.git
[root@localhost elasticsearch-head]# pwd
/home/elk/elasticsearch-head
[root@localhost elasticsearch-head]#

啟動
npm install
npm run start
nohup npm run start &

curl -XPUT ‘192.168.2.67:9100/book’

訪問192.168.2.67:9100 即可訪問

下載kafka

修改配置文件如下
[root@localhost config]# pwd
/home/elk/kafka/config
[root@localhost config]# grep -v “^#” server.properties
broker.id=0
listeners=PLAINTEXT://192.168.1.70:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/log/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
[root@localhost config]#

kafka配置啟動zookeeper

手動啟動方式
[root@localhost bin]# pwd
/home/elk/kafka/bin
[root@localhost bin]#
./zookeeper-server-start.sh ../config/zookeeper.properties

systemctl 啟動zookeeper
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat zookeeper.service
[Service]
Type=forking
SyslogIdentifier=zookeeper
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties
ExecStop=/home/elk/kafka/bin/zookeeper-server-stop.sh
[root@localhost system]#

啟動kafka服務

手動啟動方式
./kafka-server-start.sh ../config/server.properties

systemctl 啟動kafka
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kafka.service
[Unit]
Description=Apache kafka
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/kafka-server-start.sh  /home/elk/kafka/config/server.properties
ExecStop=/home/elk/kafka/bin/kafka-server-stop.sh
[root@localhost system]#

測試kafka

新建一個名字為test的topic
/kafka-topics.sh –create –zookeeper 192.168.1.70:2181 –replication-factor 1 –partitions 1 –topic test

查看kafka中的topic
./kafka-topics.sh –list  –zookeeper 192.168.1.70:2181

往kafka topic為test中 生產消息
./kafka-console-producer.sh –broker-list 192.168.1.70:9092 –topic test

在kafka topic為test中 消費消息
bin/kafka-console-consumer.sh –bootstrap-server 192.168.1.70:9092 –topic test –from-beginning

生產的消息,消費那邊接受到即是ok的

目標機器安裝filebeat

安裝6.5版本的
[root@localhost filebeat]# pwd
/usr/local/filebeat
[root@localhost filebeat]# cat filebeat.yml
filebeat.prospectors:
– type: log
  paths:
    – /opt/logs/workphone-tcp/catalina.out
  fields:
    tag: 54_tcp_catalina_out
– type: log
  paths:
    – /opt/logs/workphone-webservice/catalina.out
  fields:
    tag: 54_web_catalina_out
name: 192.168.1.54
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 3
output.kafka:
  hosts: [“192.168.1.70:9092”]
  topic: “filebeat-log”
  partition.hash:
    reachable_only: true
  compression: gzip
  max_message_bytes: 1000000
  required_acks: 1

[root@localhost filebeat]#

安裝完成后去logstash編輯配置文件

logstash操作
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]# cat dev.conf
input {
  kafka{
    bootstrap_servers => “192.168.1.70:9092”
    topics => [“filebeat-log”]
    codec => “json”
  }
}
filter {
        if [fields][tag]==”jpwebmap” {
            json{
                source => “message”
                remove_field => “message”
            }
            geoip {
            source => “client”
            target => “geoip”
            add_field => [ “[geoip][coordinates]”, “%{[geoip][longitude]}” ]
            add_field => [ “[geoip][coordinates]”, “%{[geoip][latitude]}”  ]
            }
            mutate {
                convert => [ “[geoip][coordinates]”, “float”]
                }
        }
    if [fields][tag] == “54_tcp_catalina_out”{
            grok {
                match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
            }
            date {
                match => [“logdate”, “ISO8601”]
            }
            mutate {
                remove_field => [ “logdate” ]
            }
    }
    if [fields][tag] == “54_web_catalina_out”{
                grok {
                        match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
                }
                date {
                        match => [“logdate”, “ISO8601”]
                }
                mutate {
                        remove_field => [ “logdate” ]
                }
        }
    if [fields][tag] == “55_tcp_catalina_out”{
                grok {
                        match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
                }
                date {
                        match => [“logdate”, “ISO8601”]
                }
                mutate {
                        remove_field => [ “logdate” ]
                }
        }
        if [fields][tag] == “55_web_catalina_out”{
                grok {
                        match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
                }
                date {
                        match => [“logdate”, “ISO8601”]
                }
                mutate {
                        remove_field => [ “logdate” ]
                }
        }
    if [fields][tag] == “51_nginx80_access_log” {
            mutate {
                add_field => { “spstr” => “%{[log][file][path]}” }
            }
            mutate {
                split => [“spstr” , “/”]
                # save the last element of the array as the api_method.
                add_field => [“src”, “%{[spstr][-1]}” ]
            }
            mutate{
                remove_field => [ “friends”, “ecs”, “agent” , “spstr” ]
            }
            grok {
                match => { “message” => “%{IPORHOST:remote_addr} – %{DATA:remote_user} [%{HTTPDATE:time}] “%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}” %{NUMBER:response_code} %{NUMBER:body_sent:bytes} “%{DATA:referrer}” “%{DATA:agent}” “%{DATA:x_forwarded_for}” “%{NUMBER:request_time}” “%{DATA:upstream_addr}” “%{DATA:upstream_status}”” }
                remove_field => “message”
            }
            date {
                    match => [“time”, “dd/MMM/yyyy:HH:mm:ss Z”]
                    target => “@timestamp”
            }
            geoip {
                source => “x_forwarded_for”
                target => “geoip”
                database => “/home/elk/logstash/GeoLite2-City.mmdb”
                add_field => [ “[geoip][coordinates]”, “%{[geoip][longitude]}” ]
                add_field => [ “[geoip][coordinates]”, “%{[geoip][latitude]}”  ]
            }
            mutate {
                convert => [ “[geoip][coordinates]”, “float”]
            }
    }
}
output {
if [fields][tag] == “wori”{
  elasticsearch {
  hosts => [“192.168.1.70:9200”]
  index => “zabbix”
      }
  }
if [fields][tag] == “54_tcp_catalina_out”{
  elasticsearch {
  hosts => [“192.168.1.70:9200”]
  index => “54_tcp_catalina_out”
      }
  }
if [fields][tag] == “54_web_catalina_out”{
  elasticsearch {
  hosts => [“192.168.1.70:9200”]
  index => “54_web_catalina_out”
      }
  }
if [fields][tag] == “55_tcp_catalina_out”{
  elasticsearch {
  hosts => [“192.168.1.70:9200”]
  index => “55_tcp_catalina_out”
      }
  } 
if [fields][tag] == “55_web_catalina_out”{
  elasticsearch {
  hosts => [“192.168.1.70:9200”]
  index => “55_web_catalina_out”
      }
  }
if [fields][tag] == “51_nginx80_access_log” {
      stdout{}
    elasticsearch {
    hosts => [“192.168.1.70:9200”]
    index => “51_nginx80_access_log”
    }
  }
}

其他的配置文件

index.conf
filter {
    mutate {
        add_field => { “spstr” => “%{[log][file][path]}” }
    }
        mutate {
        split => [“spstr” , “/”]
        # save the last element of the array as the api_method.
        add_field => [“src”, “%{[spstr][-1]}” ]
        }
        mutate{
    remove_field => [ “friends”, “ecs”, “agent” , “spstr” ]
    }
}

Java.conf
filter {
if [fields][tag] == “java”{
    grok {
        match => [“message”, “%{TIMESTAMP_ISO8601:logdate}”]
    }
    date {
        match => [“logdate”, “ISO8601”]
    }
    mutate {
        remove_field => [ “logdate” ]
    }
  } #End if
}

kafkainput.conf
input {
  kafka{
    bootstrap_servers => “172.16.11.68:9092”
    #topics => [“ql-prod-tomcat” ]
    topics => [“ql-prod-dubbo”,”ql-prod-nginx”,”ql-prod-tomcat” ]
    codec => “json”
    consumer_threads => 5
    decorate_events => true
    #auto_offset_reset => “latest”
    group_id => “logstash”
    #client_id => “”
    ############################# HELK Optimizing Latency #############################
    fetch_min_bytes => “1”
    request_timeout_ms => “305000”
    ############################# HELK Optimizing Availability #############################
    session_timeout_ms => “10000”
    max_poll_records => “550”
    max_poll_interval_ms => “300000”
  }

}
#input {
#  kafka{
#    bootstrap_servers => “172.16.11.68:9092”
#    topics => [“ql-prod-java-dubbo”,”ql-prod”,”ql-prod-java” ]
#    codec => “json”
#    consumer_threads => 15
#    decorate_events => true
#    auto_offset_reset => “latest”
#    group_id => “logstash-1”
#    ############################# HELK Optimizing Latency #############################
#    fetch_min_bytes => “1”
#    request_timeout_ms => “305000”
#    ############################# HELK Optimizing Availability #############################
#    session_timeout_ms => “10000”
#    max_poll_records => “550”
#    max_poll_interval_ms => “300000”
#  }

#}

nginx.conf
filter {
if [fields][tag] == “nginx-access” {
        mutate {
        add_field => { “spstr” => “%{[log][file][path]}” }
        }
        mutate {
        split => [“spstr” , “/”]
        # save the last element of the array as the api_method.
        add_field => [“src”, “%{[spstr][-1]}” ]
        }
        mutate{
        remove_field => [ “friends”, “ecs”, “agent” , “spstr” ]
        }

    grok {
        match => { “message” => “%{IPORHOST:remote_addr} – %{DATA:remote_user} [%{HTTPDATE:time}] “%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}” %{NUMBER:response_code} %{NUMBER:body_sent:bytes} “%{DATA:referrer}” “%{DATA:agent}” “%{DATA:x_forwarded_for}” “%{NUMBER:request_time}” “%{DATA:upstream_addr}” “%{DATA:upstream_status}”” }
        remove_field => “message”
    }
    date {
                match => [“time”, “dd/MMM/yyyy:HH:mm:ss Z”]
                target => “@timestamp”
        }
    geoip {
        source => “x_forwarded_for”
        target => “geoip”
        database => “/opt/logstash-6.2.4/GeoLite2-City.mmdb”
        add_field => [ “[geoip][coordinates]”, “%{[geoip][longitude]}” ]
        add_field => [ “[geoip][coordinates]”, “%{[geoip][latitude]}”  ]

        }
    mutate {
        convert => [ “[geoip][coordinates]”, “float”]
    }

  } #endif
}

ouput.conf
output{
  if [fields][tag] == “nginx-access” {
      stdout{}
    elasticsearch {
    user => elastic
    password => WR141bp2sveJuGFaD4oR
    hosts => [“172.16.11.67:9200”]
    index => “logstash-%{[fields][proname]}-%{+YYYY.MM.dd}”
    }
  }
      #stdout{}
  if [fields][tag] == “java” {
        elasticsearch {
        user => elastic
        password => WR141bp2sveJuGFaD4oR
        hosts => [“172.16.11.66:9200″,”172.16.11.68:9200”]
        index => “%{[host][name]}-%{[src]}”
        }
  }
}

贊(0)
分享到: 更多 (0)
?
網站地圖   滬ICP備18035694號-2    滬公網安備31011702889846號
久久精品五月,日韩不卡视频在线观看,国产精品videossex久久发布 ,久久av综合
老司机精品久久| 欧美国产一级| 欧美亚洲激情| 激情欧美丁香| 亚洲国产一区二区三区在线播放| 日本精品不卡| 99久久久久国产精品| 欧美丝袜一区| 尤物在线精品| 免费日本视频一区| 亚洲激情婷婷| 伊人久久亚洲| 日韩一区网站| 国产美女视频一区二区| 国产精品日本一区二区不卡视频| 国产欧美啪啪| 韩国女主播一区二区三区| av资源中文在线天堂| 久久久蜜桃一区二区人| 日韩国产专区| 日韩视频二区| 中文字幕乱码亚洲无线精品一区| 亚洲理论在线| 欧美一级二区| 国产成人精品一区二区三区在线| 黄色aa久久| 欧美日韩少妇| 91精品丝袜国产高跟在线| 久久久久久久久成人| 色偷偷偷在线视频播放| 自由日本语亚洲人高潮| 亚洲不卡视频| 成人一区而且| 国产亚洲毛片| 国产乱人伦精品一区| 国产福利电影在线播放| 激情综合网五月| 五月亚洲婷婷 | 日产午夜精品一线二线三线| 欧美日韩一区二区综合| 日本不卡高清视频| 手机在线电影一区| 蜜臀久久99精品久久久久宅男| 国产欧美久久一区二区三区| 色天使综合视频| 日韩精品午夜视频| 精品国产一区二区三区噜噜噜| 免费成人网www| 国产精品毛片aⅴ一区二区三区| 久久男女视频| 69堂精品视频在线播放| 久久精品观看| 日韩高清不卡一区| 吉吉日韩欧美| 亚洲一级大片| 日韩在线中文| 国产毛片久久久| 伊人影院久久| 日韩a一区二区| 蜜桃久久久久久| 在线天堂资源www在线污| 99视频精品免费观看| 久久久国产精品网站| 一区久久精品| 福利精品在线| 色8久久久久| 欧美日一区二区| 麻豆精品久久| 亚洲精品乱码日韩| 久久国产影院| 久久影视三级福利片| 亚洲另类av| 不卡在线一区二区| 久久精品色播| 日本在线不卡视频| 婷婷精品视频| 欧美激情视频一区二区三区免费| 丝袜诱惑制服诱惑色一区在线观看| 国产成人77亚洲精品www| 999久久久精品国产| 麻豆国产欧美一区二区三区| 综合精品一区| 亚洲精品小说| 蜜臀国产一区| 国产精品玖玖玖在线资源| 中文亚洲免费| 亚洲高清不卡| 日韩欧美视频专区| 精品72久久久久中文字幕| 日韩精品一区二区三区中文| 亚洲一区二区网站| 久久黄色影院| 色在线中文字幕| 国内一区二区三区| 国产日本精品| 日韩黄色av| 蜜桃久久久久久| 日韩一级精品| 欧美日韩中文字幕一区二区三区| 精品五月天堂| 欧美成人精品一级| 久久国内精品自在自线400部| 日韩精品一级二级 | 色黄视频在线观看| 成人在线超碰| 国产精品成人**免费视频| 日韩高清电影免费| 97成人在线| 91精品国产一区二区在线观看| 蜜桃91丨九色丨蝌蚪91桃色| 美日韩精品视频| 美女久久一区| 蜜桃久久精品一区二区| 香蕉成人久久| 日韩专区欧美专区| 免费成人性网站| 石原莉奈在线亚洲二区| 国产精品女主播一区二区三区| 欧美二区视频| 日韩视频在线一区二区三区| 国模 一区 二区 三区| 亚洲国产专区| 一区久久精品| 亚洲婷婷丁香| 日韩中文字幕| 日本欧美在线| 国产精品自拍区| 欧美激情麻豆| 福利精品在线| 激情丁香综合| 亚洲欧美日韩专区| 一区二区三区午夜视频| 日韩高清在线不卡| 欧美亚洲免费| 美女视频黄久久| 麻豆精品一区二区综合av| 精品视频高潮| 日本蜜桃在线观看视频| 久久国产日韩| 性色一区二区| 日韩av一二三| 麻豆视频久久| 亚洲欧洲高清| 欧美不卡在线| 亚洲精品激情| 麻豆一区二区99久久久久| 福利片在线一区二区| 久久久影院免费| 视频一区二区三区中文字幕| 日韩国产欧美在线播放| 国产精品一区三区在线观看| 成人午夜毛片| 欧美a级片一区| 亚洲精品在线国产| 国产精品videosex极品| 在线天堂资源www在线污| 国产一区亚洲| 亚洲精品国产日韩| 粉嫩av一区二区三区四区五区| 日韩专区精品| 免费成人av在线播放| 国产精品多人| 91久久国产| 91精品国产一区二区在线观看| 精品99在线| 狠狠色综合网| 欧美日韩亚洲一区在线观看| 91一区二区三区四区| 国产视频久久| 国产精品qvod| 亚洲天堂成人| 久久国产精品免费一区二区三区| 在线天堂中文资源最新版| 亚洲欧美日韩专区| 麻豆一区在线| 国产视频一区三区| 久久99精品久久久久久园产越南| 久久精品在线| 日本午夜精品视频在线观看| 香蕉视频亚洲一级| 蜜臀久久99精品久久久画质超高清| 国产精品入口久久| 爽爽淫人综合网网站| 动漫av一区| 亚洲精品视频一二三区| 综合日韩av| 亚洲69av| 99久久婷婷| 国产精品99久久久久久董美香| 美女网站一区| 麻豆国产欧美一区二区三区| 亚洲欧美日韩综合国产aⅴ| 久久一区视频| 视频一区二区三区在线| 精品国产免费人成网站| 视频一区二区中文字幕| 国产在线看片免费视频在线观看| 日本在线成人| 91精品91| 国产精品精品国产一区二区|