大家好,欢迎来到IT知识分享网。
部署
主要docker-compose.yml
version: "2.2" services: #证书生成逻辑 create_certs: image: docker.elastic.co/elasticsearch/elasticsearch:${
STACK_VERSION} volumes: - /opt/module/docker-compose/elk-prod/certs:/usr/share/elasticsearch/config/certs user: "0" container_name: es_create_certs command: > bash -c ' if [ x${ELASTIC_PASSWORD} == x ]; then echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; exit 1; elif [ x${KIBANA_PASSWORD} == x ]; then echo "Set the KIBANA_PASSWORD environment variable in the .env file"; exit 1; fi; if [[ ! -f config/certs/ca.zip ]]; then bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; unzip config/certs/ca.zip -d config/certs; bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; unzip config/certs/certs.zip -d config/certs; fi; echo "Setting file permissions" chown -R 1000:0 /certs echo "Waiting for Elasticsearch availability"; until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; echo "Setting kibana_system password"; until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${
ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{
\"password\":\"${
KIBANA_PASSWORD}\"}" | grep -q "^{
}"; do sleep 10; done; echo "All done!"; ' healthcheck: test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"] interval: 1s timeout: 5s retries: 120 #es01节点配置 es01: depends_on: create_certs: condition: service_healthy image: docker.elastic.co/elasticsearch/elasticsearch:${
STACK_VERSION} restart: always volumes: - /opt/module/docker-compose/elk-prod/certs:/usr/share/elasticsearch/config/certs - /opt/module/docker-compose/elk-prod/es/plugins:/usr/share/elasticsearch/plugins - /opt/module/docker-compose/elk-prod/es/data/es01:/usr/share/elasticsearch/data ports: - ${
ES_PORT}:9200 environment: - node.name=es02 - cluster.name=${
CLUSTER_NAME} - cluster.initial_master_nodes=es01,es02,es03 - discovery.seed_hosts=es02,es03 - ELASTIC_PASSWORD=${
ELASTIC_PASSWORD} - "ES_JAVA_OPTS=-Xms256m -Xmx256m" - bootstrap.memory_lock=true - xpack.security.enabled=true - xpack.security.http.ssl.enabled=true - xpack.security.http.ssl.key=certs/es01/es01.key - xpack.security.http.ssl.certificate=certs/es01/es01.crt - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt - xpack.security.transport.ssl.enabled=true - xpack.security.transport.ssl.key=certs/es01/es01.key - xpack.security.transport.ssl.certificate=certs/es01/es01.crt - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt - xpack.security.transport.ssl.verification_mode=certificate ulimits: memlock: soft: -1 hard: -1 healthcheck: test: [ "CMD-SHELL", "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", ] interval: 10s timeout: 10s retries: 120 #es02节点配置 es02: depends_on: - es01 image: docker.elastic.co/elasticsearch/elasticsearch:${
STACK_VERSION} restart: always volumes: - /opt/module/docker-compose/elk-prod/certs:/usr/share/elasticsearch/config/certs - /opt/module/docker-compose/elk-prod/es/plugins:/usr/share/elasticsearch/plugins - /opt/module/docker-compose/elk-prod/es/data/es02:/usr/share/elasticsearch/data environment: - node.name=es02 - cluster.name=${
CLUSTER_NAME} - cluster.initial_master_nodes=es01,es02,es03 - discovery.seed_hosts=es01,es03 - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms256m -Xmx256m" - xpack.security.enabled=true - xpack.security.http.ssl.enabled=true - xpack.security.http.ssl.key=certs/es02/es02.key - xpack.security.http.ssl.certificate=certs/es02/es02.crt - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt - xpack.security.transport.ssl.enabled=true - xpack.security.transport.ssl.key=certs/es02/es02.key - xpack.security.transport.ssl.certificate=certs/es02/es02.crt - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt - xpack.security.transport.ssl.verification_mode=certificate ulimits: memlock: soft: -1 hard: -1 healthcheck: test: [ "CMD-SHELL", "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", ] interval: 10s timeout: 10s retries: 120 #es03节点配置 es03: depends_on: - es02 image: docker.elastic.co/elasticsearch/elasticsearch:${
STACK_VERSION} restart: always volumes: - /opt/module/docker-compose/elk-prod/certs:/usr/share/elasticsearch/config/certs - /opt/module/docker-compose/elk-prod/es/plugins:/usr/share/elasticsearch/plugins - /opt/module/docker-compose/elk-prod/es/data/es03:/usr/share/elasticsearch/data environment: - node.name=es03 - cluster.name=${
CLUSTER_NAME} - cluster.initial_master_nodes=es01,es02,es03 - discovery.seed_hosts=es01,es02 - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms256m -Xmx256m" - xpack.security.enabled=true - xpack.security.http.ssl.enabled=true - xpack.security.http.ssl.key=certs/es03/es03.key - xpack.security.http.ssl.certificate=certs/es03/es03.crt - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt - xpack.security.transport.ssl.enabled=true - xpack.security.transport.ssl.key=certs/es03/es03.key - xpack.security.transport.ssl.certificate=certs/es03/es03.crt - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt - xpack.security.transport.ssl.verification_mode=certificate ulimits: memlock: soft: -1 hard: -1 healthcheck: test: [ "CMD-SHELL", "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", ] interval: 10s timeout: 10s retries: 120 #kibana节点配置 kibana: depends_on: es01: condition: service_healthy es02: condition: service_healthy es03: condition: service_healthy restart: always image: docker.elastic.co/kibana/kibana:${
STACK_VERSION} volumes: - /opt/module/docker-compose/elk-prod/certs:/usr/share/kibana/config/certs - /opt/module/docker-compose/elk-prod/kibana/plugins:/usr/share/kibana/plugins - /opt/module/docker-compose/elk-prod/kibana/data:/usr/share/kibana/data ports: - ${
KIBANA_PORT}:5601 environment: - SERVERNAME=kibana - ELASTICSEARCH_HOSTS=https://es01:9200 - ELASTICSEARCH_USERNAME=kibana_system - ELASTICSEARCH_PASSWORD=${
KIBANA_PASSWORD} - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt healthcheck: test: [ "CMD-SHELL", "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", ] interval: 10s timeout: 10s retries: 120
相关参数说明
healthcheck 通过命令去做一个检测,检测容器是否正常运行 |
depends_on 让当前容器依据上一个容器状态去执行部署,这样方便排除问题。否则三个容器同时启动你可能很难知道哪一个出了问题
ES_JAVA_OPTS es一定要注意内存的分配
create_certs
instances: - name: es01 dns: - es01 - localhost ip: - 127.0.0.1 - name: es02 dns: - es02 - localhost ip: - 127.0.0.1 - name: es03 dns: - es03 - localhost ip: - 127.0.0.1 - name: kibana dns: - kibana - localhost ip: - 127.0.0.1 - name: logstash dns: - logstash - localhost ip: - 127.0.0.1
logstash
logstash我这里是单独使用了一个docker-compose 去单独装配logstash,你也可以把他们都合起来
version: "2.2" services: #logstah 配置 logstash: image: logstash:${
STACK_VERSION} restart: always volumes: - /opt/module/docker-compose/elk-prod/certs/logstash:/etc/logstash/config/certs - /opt/module/docker-compose/elk-prod/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml - /opt/module/docker-compose/elk-prod/logstash/config/pipeline:/usr/share/logstash/pipeline - /opt/module/docker-compose/elk-prod/logstash/data:/usr/share/logstash/data - /opt/module/docker-compose/elk-prod/logstash/plugins:/usr/share/logstash/plugins ports: - ${
LOGSTASH_PORT}:4560
logstash ym l文件这边使用在kibana 创建logstash用户
node.name: logstash xpack.monitoring.enabled: true xpack.monitoring.elasticsearch.username: logstash_system xpack.monitoring.elasticsearch.password: 'password' xpack.monitoring.elasticsearch.hosts: [ 'https://es01:9200' ] xpack.monitoring.elasticsearch.ssl.certificate_authority: /etc/logstash/config/certs/ca.crt
logstash.conf示例
input { beats { port => 5044 ssl => true ssl_key => '/etc/logstash/config/certs/logstash.pkcs8.key' ssl_certificate => '/etc/logstash/config/certs/logstash.crt' } file { path => "/usr/share/logstash/data/movies.csv" start_position => "beginning" sincedb_path => "/dev/null" } } filter { csv { separator => "," columns => ["id","content","genre"] } mutate { split => { "genre" => "|" } remove_field => ["path", "host","@timestamp","message"] } mutate { split => ["content", "("] add_field => { "title" => "%{[content][0]}"} add_field => { "year" => "%{[content][1]}"} } mutate { convert => { "year" => "integer" } strip => ["title"] remove_field => ["path", "host","@timestamp","message","content"] } } output { elasticsearch { hosts => ["https://es01:9200"] index => "movies-%{+YYYY.MM.dd}" document_id => "%{id}" cacert => '/etc/logstash/config/certs/ca.crt' user => 'logstash_writer' password => 'xxx' } stdout {} }
数据类型
{
"settings": {
"number_of_shards": 3, "number_of_replicas": 2, "analysis": {
"analyzer": {
"ik_analyzer": {
"type": "custom", "tokenizer": "ik_max_word" }, "ik_smart": {
"type": "custom", "tokenizer": "ik_smart" } } } }, "aliases": {
"knowledge_detail": {
} }, "mappings": {
"properties": {
"system_from": {
"type": "keyword" }, "author_code": {
"type": "keyword" }, "article_author": {
"type": "text", "fields": {
"keyword": {
"type": "keyword", "ignore_above": 256 } }, "analyzer": "ik_analyzer", "copy_to": "navigation_search" }, "tag": {
"type": "text", "fields": {
"keyword": {
"type": "keyword", "ignore_above": 256 } }, "analyzer": "ik_analyzer", "copy_to": "navigation_search" }, "article_title": {
"type": "text", "analyzer": "ik_analyzer", "copy_to": "navigation_search" }, "article_info": {
"type": "text", "analyzer": "ik_analyzer", "copy_to": "navigation_search" }, "navigation_search": {
"type": "text", "analyzer": "ik_analyzer" }, "is_private": {
"type": "integer", "null_value": 0 }, "hit_count": {
"type": "integer", "null_value": 0 }, "collect_count": {
"type": "integer", "null_value": 0 }, "created_time": {
"type": "date", "format": "yyyy-MM-dd HH:mm:ss" }, "updated_time": {
"type": "date", "format": "yyyy-MM-dd HH:mm:ss" } } } }
text
text 字母意思就是文本,可以看我这里的 article_author 字段配置
"article_author": {
"type": "text", "fields": {
"keyword": {
"type": "keyword", "ignore_above": 256 } }, "analyzer": "ik_analyzer", "copy_to": "navigation_search" }
ik分词器的安装
POST _analyze { "text":"Tom & Viv", "analyzer": "ik_smart" }
向量搜索技术
思考:什么是向量搜索? 为什么要用向量搜索?
1. 搜索效果 : 希望搜索引擎等达到理解我输入的文字背后的意义
2. 输入的方式: 输入方式不在仅仅局限于文本,利用图片等其他方式作为输入方式
3. 关联性: 能在输入的主题领域里或者上下文都能产生关联
未完待续
免责声明:本站所有文章内容,图片,视频等均是来源于用户投稿和互联网及文摘转载整编而成,不代表本站观点,不承担相关法律责任。其著作权各归其原作者或其出版社所有。如发现本站有涉嫌抄袭侵权/违法违规的内容,侵犯到您的权益,请在线联系站长,一经查实,本站将立刻删除。 本文来自网络,若有侵权,请联系删除,如若转载,请注明出处:https://haidsoft.com/130293.html