Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision Next revisionBoth sides next revision | ||
devops:deploy_elk_using_helm [2020/04/07 20:45] – [Ошибки SSL при начальной смене паролей bin/elasticsearch-setup-passwords] admin | devops:deploy_elk_using_helm [2020/06/01 18:44] – [RBAC using Oidc proxy] admin | ||
---|---|---|---|
Line 1: | Line 1: | ||
+ | ====== Add elastic repo ====== | ||
+ | helm repo add elastic https:// | ||
+ | ====== Install Elasticsearch ====== | ||
+ | ===== Create namespace ===== | ||
+ | kubectl create ns elasticsearch | ||
+ | ===== deploy Elasticsearch from helm ===== | ||
+ | helm install --namespace elasticsearch --name elasticsearch elastic/ | ||
+ | ===== Create Persistent Volume for Elasticsearch ===== | ||
+ | sudo mkdir / | ||
+ | sudo chmod a+rw -R / | ||
+ | < | ||
+ | kind: PersistentVolume | ||
+ | metadata: | ||
+ | name: elasticsearch-data | ||
+ | | ||
+ | | ||
+ | app: elasticsearch-master | ||
+ | spec: | ||
+ | capacity: | ||
+ | storage: 32Gi | ||
+ | accessModes: | ||
+ | - ReadWriteOnce | ||
+ | hostPath: | ||
+ | path: "/ | ||
+ | type: Directory | ||
+ | persistentVolumeReclaimPolicy: | ||
+ | | ||
+ | И дадим права на запись в индексы: | ||
+ | kubectl exec -it -n elasticsearch elasticsearch-master-0 -- curl -XPUT -H " | ||
+ | Так как нода у меня одна, то мне нужно запретить создавать реплики. Иниче - в дальнейшем кластер просто перестанет работать: | ||
+ | kubectl exec -it -n elasticsearch elasticsearch-master-0 -- curl -H " | ||
+ | И почистим уже созданные индексы: | ||
+ | kubectl exec -it -n elasticsearch elasticsearch-master-0 -- curl -XDELETE ' | ||
+ | Теперь в логах пода **elasticsearch-master-0** появится строка: | ||
+ | Cluster health status changed from [YELLOW] to [GREEN] | ||
+ | И в этот elasticsearch можно будет писать логи. | ||
+ | ====== Install Kibana ====== | ||
+ | kubectl create ns kibana | ||
+ | helm install --namespace elasticsearch --name kibana elastic/ | ||
+ | |||
+ | helm upgrade kibana elastic/ | ||
+ | ===== Kibana Ingress ===== | ||
+ | < | ||
+ | kind: Ingress | ||
+ | metadata: | ||
+ | annotations: | ||
+ | kubernetes.io/ | ||
+ | cert-manager.io/ | ||
+ | name: kibana-ingress | ||
+ | namespace: elasticsearch | ||
+ | spec: | ||
+ | rules: | ||
+ | - host: kibana.autosys.tk | ||
+ | http: | ||
+ | paths: | ||
+ | - backend: | ||
+ | serviceName: | ||
+ | servicePort: | ||
+ | path: / | ||
+ | tls: | ||
+ | - hosts: | ||
+ | - kibana.autosys.tk | ||
+ | secretName: | ||
+ | </ | ||
+ | |||
+ | ====== Logstash ====== | ||
+ | https:// | ||
+ | https:// | ||
+ | helm install --namespace elasticsearch --name logstash elastic/ | ||
+ | |||
+ | ===== Настройка rsyslog и фильтра logstash ===== | ||
+ | У меня **rsyslog** сыплет логи в таком виде: | ||
+ | Dec 25 14:06:25 kub kubelet[989]: | ||
+ | |||
+ | Для того, чтобы **logstash** смог сформировать объект json-объект **message** нужно в конфигурацию прописать фильтр, | ||
+ | Встроенные паттерны тут: https:// | ||
+ | Проверить паттерны можно тут: http:// | ||
+ | Для вышеприведенной стоки подходит такой фильтр: | ||
+ | %{SYSLOGTIMESTAMP: | ||
+ | Для того, чтобы логи попадали в **elasticsearch** в конфиг **rsyslog** (/ | ||
+ | < | ||
+ | Тут *.* - это значит все записи, | ||
+ | ===== logstash_values.yaml ===== | ||
+ | < | ||
+ | replicas: 1 | ||
+ | |||
+ | # Allows you to add any config files in / | ||
+ | # such as logstash.yml and log4j2.properties | ||
+ | logstashConfig: | ||
+ | logstash.yml: | ||
+ | http.host: " | ||
+ | config.reload.automatic: | ||
+ | xpack.management.enabled: | ||
+ | | ||
+ | # key: | ||
+ | # nestedkey: value | ||
+ | # log4j2.properties: | ||
+ | # key = value | ||
+ | |||
+ | # Allows you to add any pipeline files in / | ||
+ | logstashPipeline: | ||
+ | input_main.conf: | ||
+ | input { | ||
+ | udp { | ||
+ | port => 1514 | ||
+ | type => syslog | ||
+ | } | ||
+ | tcp { | ||
+ | port => 1514 | ||
+ | type => syslog | ||
+ | } | ||
+ | http { | ||
+ | port => 8080 | ||
+ | } | ||
+ | # kafka { | ||
+ | # ## ref: https:// | ||
+ | # | ||
+ | # codec => json { charset => " | ||
+ | # | ||
+ | # | ||
+ | # type => " | ||
+ | # } | ||
+ | } | ||
+ | filter_main.conf: | ||
+ | filter { | ||
+ | if [type] == " | ||
+ | # Uses built-in Grok patterns to parse this standard format | ||
+ | grok { | ||
+ | match => { | ||
+ | " | ||
+ | } | ||
+ | } | ||
+ | # Sets the timestamp of the event to the timestamp of recorded in the log-data | ||
+ | # By default, logstash sets the timestamp to the time it was ingested. | ||
+ | #date { | ||
+ | # match => [ " | ||
+ | #} | ||
+ | mutate { | ||
+ | rename => [" | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | output_main.conf: | ||
+ | output { | ||
+ | # stdout { codec => rubydebug } | ||
+ | elasticsearch { | ||
+ | hosts => [" | ||
+ | manage_template => false | ||
+ | index => " | ||
+ | } | ||
+ | # kafka { | ||
+ | # | ||
+ | # | ||
+ | # | ||
+ | # | ||
+ | # | ||
+ | # } | ||
+ | } | ||
+ | |||
+ | # Extra environment variables to append to this nodeGroup | ||
+ | # This will be appended to the current ' | ||
+ | # syntax here | ||
+ | extraEnvs: | ||
+ | - name: " | ||
+ | value: " | ||
+ | - name: " | ||
+ | value: " | ||
+ | # - name: MY_ENVIRONMENT_VAR | ||
+ | # value: the_value_goes_here | ||
+ | |||
+ | # A list of secrets and their paths to mount inside the pod | ||
+ | secretMounts: | ||
+ | |||
+ | image: " | ||
+ | imageTag: " | ||
+ | imagePullPolicy: | ||
+ | imagePullSecrets: | ||
+ | |||
+ | podAnnotations: | ||
+ | |||
+ | # additionals labels | ||
+ | labels: | ||
+ | app: logstash | ||
+ | | ||
+ | logstashJavaOpts: | ||
+ | |||
+ | resources: | ||
+ | requests: | ||
+ | cpu: " | ||
+ | memory: " | ||
+ | limits: | ||
+ | cpu: " | ||
+ | memory: " | ||
+ | |||
+ | volumeClaimTemplate: | ||
+ | accessModes: | ||
+ | resources: | ||
+ | requests: | ||
+ | storage: 1Gi | ||
+ | |||
+ | rbac: | ||
+ | create: false | ||
+ | serviceAccountName: | ||
+ | |||
+ | podSecurityPolicy: | ||
+ | create: false | ||
+ | name: "" | ||
+ | spec: | ||
+ | privileged: true | ||
+ | fsGroup: | ||
+ | rule: RunAsAny | ||
+ | runAsUser: | ||
+ | rule: RunAsAny | ||
+ | seLinux: | ||
+ | rule: RunAsAny | ||
+ | supplementalGroups: | ||
+ | rule: RunAsAny | ||
+ | volumes: | ||
+ | - secret | ||
+ | - configMap | ||
+ | - persistentVolumeClaim | ||
+ | |||
+ | persistence: | ||
+ | enabled: false | ||
+ | annotations: | ||
+ | |||
+ | extraVolumes: | ||
+ | # - name: extras | ||
+ | # | ||
+ | |||
+ | extraVolumeMounts: | ||
+ | # - name: extras | ||
+ | # | ||
+ | # | ||
+ | |||
+ | extraContainers: | ||
+ | # - name: do-something | ||
+ | # | ||
+ | # | ||
+ | |||
+ | extraInitContainers: | ||
+ | # - name: do-something | ||
+ | # | ||
+ | # | ||
+ | |||
+ | # This is the PriorityClass settings as defined in | ||
+ | # https:// | ||
+ | priorityClassName: | ||
+ | |||
+ | # By default this will make sure two pods don't end up on the same node | ||
+ | # Changing this to a region would allow you to spread pods across regions | ||
+ | antiAffinityTopologyKey: | ||
+ | |||
+ | # Hard means that by default pods will only be scheduled if there are enough nodes for them | ||
+ | # and that they will never end up on the same node. Setting this to soft will do this "best effort" | ||
+ | antiAffinity: | ||
+ | |||
+ | # This is the node affinity settings as defined in | ||
+ | # https:// | ||
+ | nodeAffinity: | ||
+ | |||
+ | # The default is to deploy all pods serially. By setting this to parallel all pods are started at | ||
+ | # the same time when bootstrapping the cluster | ||
+ | podManagementPolicy: | ||
+ | |||
+ | httpPort: 9600 | ||
+ | |||
+ | updateStrategy: | ||
+ | |||
+ | # This is the max unavailable setting for the pod disruption budget | ||
+ | # The default value of 1 will make sure that kubernetes won't allow more than 1 | ||
+ | # of your pods to be unavailable during maintenance | ||
+ | maxUnavailable: | ||
+ | |||
+ | podSecurityContext: | ||
+ | fsGroup: 1000 | ||
+ | runAsUser: 1000 | ||
+ | |||
+ | securityContext: | ||
+ | capabilities: | ||
+ | drop: | ||
+ | - ALL | ||
+ | # readOnlyRootFilesystem: | ||
+ | runAsNonRoot: | ||
+ | runAsUser: 1000 | ||
+ | |||
+ | # How long to wait for logstash to stop gracefully | ||
+ | terminationGracePeriod: | ||
+ | |||
+ | livenessProbe: | ||
+ | httpGet: | ||
+ | path: / | ||
+ | port: http | ||
+ | initialDelaySeconds: | ||
+ | periodSeconds: | ||
+ | timeoutSeconds: | ||
+ | failureThreshold: | ||
+ | successThreshold: | ||
+ | |||
+ | readinessProbe: | ||
+ | httpGet: | ||
+ | path: / | ||
+ | port: http | ||
+ | initialDelaySeconds: | ||
+ | periodSeconds: | ||
+ | timeoutSeconds: | ||
+ | failureThreshold: | ||
+ | successThreshold: | ||
+ | |||
+ | ## Use an alternate scheduler. | ||
+ | ## ref: https:// | ||
+ | ## | ||
+ | schedulerName: | ||
+ | |||
+ | nodeSelector: | ||
+ | tolerations: | ||
+ | |||
+ | nameOverride: | ||
+ | fullnameOverride: | ||
+ | |||
+ | lifecycle: {} | ||
+ | # preStop: | ||
+ | # exec: | ||
+ | # | ||
+ | # postStart: | ||
+ | # exec: | ||
+ | # | ||
+ | |||
+ | service: | ||
+ | annotations: | ||
+ | type: LoadBalancer | ||
+ | ports: | ||
+ | - name: beats | ||
+ | port: 5044 | ||
+ | protocol: TCP | ||
+ | targetPort: 5044 | ||
+ | - name: http | ||
+ | port: 8080 | ||
+ | protocol: TCP | ||
+ | targetPort: 8080 | ||
+ | - name: syslog | ||
+ | port: 1514 | ||
+ | targetPort: 1514 | ||
+ | </ | ||
+ | |||
+ | ===== Create Persistent Volume for LogStash ===== | ||
+ | sudo mkdir / | ||
+ | sudo chmod a+rw -R / | ||
+ | < | ||
+ | kind: PersistentVolume | ||
+ | metadata: | ||
+ | name: logstash-data | ||
+ | | ||
+ | | ||
+ | app: logstash | ||
+ | spec: | ||
+ | capacity: | ||
+ | storage: 2Gi | ||
+ | accessModes: | ||
+ | - ReadWriteOnce | ||
+ | hostPath: | ||
+ | path: "/ | ||
+ | type: Directory | ||
+ | persistentVolumeReclaimPolicy: | ||
+ | |||
+ | ====== Проверка работоспособности ELK ====== | ||
+ | Теперь можно проверить, | ||
+ | Можно отправить сообщение в **logstash** на **input http**: | ||
+ | curl -XPUT ' | ||
+ | В ответ должно быть **ok**. \\ | ||
+ | А в **kibana** можно нажать **Discovery**, | ||
+ | ====== Filebeat ====== | ||
+ | Для работы с файлами логов потребуется дополнительный сервис **Filebeat**, | ||
+ | < | ||
+ | - type: log | ||
+ | enabled: true | ||
+ | paths: | ||
+ | - / | ||
+ | fields: | ||
+ | type: nginx | ||
+ | fields_under_root: | ||
+ | scan_frequency: | ||
+ | |||
+ | output.logstash: | ||
+ | hosts: [" | ||
+ | |||
+ | ====== Настройка безопасности ====== | ||
+ | |||
+ | По-умолчанию стек **ELK** не обеспечивает безопасного доступа к данным и компоненты никак не аутентифицируются. То есть записать и читать данные может кто угодно. \\ | ||
+ | Базовая бесплатная лицензия не позволяет аутентифицировать пользователей из каталогов **LDAP**, поэтому нужно либо платить, | ||
+ | |||
+ | |||
+ | ===== LDAP-аутентификация в Kibana и RBAC для elasticsearch помощью oidc и proxy ===== | ||
+ | Общая идея такая: | ||
+ | * Пользователи имеют доступ только к **kibana** | ||
+ | * Перед **Kibana** работает **oidc-proxy** (**openresty**), | ||
+ | * **Kibana** добавляет выбранные заголовки в запросы к **elasticsearch**. | ||
+ | * Запросы от **kibana** к **elasticsearch** идет через второй **proxy**, который смотрит состав групп из заголовков и проксирует запросы только с разрешенными для данной группы URI и HTTP-методами. | ||
+ | Используемые компоненты: | ||
+ | * **Active Directory** | ||
+ | * **Keycloak** | ||
+ | * **openresty** + **lua-resty-openidc** | ||
+ | Вот ссылочки, | ||
+ | - https:// | ||
+ | - https:// | ||
+ | - https:// | ||
+ | - https:// | ||
+ | - https:// | ||
+ | |||
+ | Я изобретаю свои велосипеды, | ||
+ | * образ **openresty** с **oidc** - https:// | ||
+ | * вот решение для аутентификации **elasticsearch** в **LDAP** - https:// | ||
+ | |||
+ | ==== Keycloak ==== | ||
+ | Развернут с помощью **helm** - [[devops: | ||
+ | **REALM** настроен так: https:// | ||
+ | ==== Собираем образ openresty с модулем lua-resty-openidc ==== | ||
+ | **Dockerfile** на базе https:// | ||
+ | < | ||
+ | MAINTAINER Mikhail Usik < | ||
+ | |||
+ | ENV LUA_SUFFIX=jit-2.1.0-beta3 \ | ||
+ | LUAJIT_VERSION=2.1 \ | ||
+ | NGINX_PREFIX=/ | ||
+ | OPENRESTY_PREFIX=/ | ||
+ | OPENRESTY_SRC_SHA256=bf92af41d3ad22880047a8b283fc213d59c7c1b83f8dae82e50d14b64d73ac38 \ | ||
+ | OPENRESTY_VERSION=1.15.8.2 \ | ||
+ | LUAROCKS_VERSION=3.1.3 \ | ||
+ | LUAROCKS_SRC_SHA256=c573435f495aac159e34eaa0a3847172a2298eb6295fcdc35d565f9f9b990513 \ | ||
+ | LUA_RESTY_OPENIDC_VERSION=1.7.2-1 \ | ||
+ | VAR_PREFIX=/ | ||
+ | |||
+ | RUN set -ex \ | ||
+ | && apk --no-cache add \ | ||
+ | libgcc \ | ||
+ | libpcrecpp \ | ||
+ | libpcre16 \ | ||
+ | libpcre32 \ | ||
+ | libssl1.1 \ | ||
+ | libstdc++ \ | ||
+ | openssl \ | ||
+ | pcre \ | ||
+ | curl \ | ||
+ | unzip \ | ||
+ | git \ | ||
+ | dnsmasq \ | ||
+ | ca-certificates \ | ||
+ | && apk --no-cache add --virtual .build-dependencies \ | ||
+ | make \ | ||
+ | musl-dev \ | ||
+ | gcc \ | ||
+ | ncurses-dev \ | ||
+ | openssl-dev \ | ||
+ | pcre-dev \ | ||
+ | perl \ | ||
+ | readline-dev \ | ||
+ | zlib-dev \ | ||
+ | libc-dev \ | ||
+ | \ | ||
+ | ## OpenResty | ||
+ | && curl -fsSL https:// | ||
+ | \ | ||
+ | && cd /tmp \ | ||
+ | && echo " | ||
+ | && tar -xzf openresty.tar.gz \ | ||
+ | \ | ||
+ | && cd openresty-* \ | ||
+ | && readonly NPROC=$(grep -c ^processor / | ||
+ | && ./configure \ | ||
+ | --prefix=${OPENRESTY_PREFIX} \ | ||
+ | --http-client-body-temp-path=${VAR_PREFIX}/ | ||
+ | --http-proxy-temp-path=${VAR_PREFIX}/ | ||
+ | --http-log-path=${VAR_PREFIX}/ | ||
+ | --error-log-path=${VAR_PREFIX}/ | ||
+ | --pid-path=${VAR_PREFIX}/ | ||
+ | --lock-path=${VAR_PREFIX}/ | ||
+ | --with-luajit \ | ||
+ | --with-pcre-jit \ | ||
+ | --with-ipv6 \ | ||
+ | --with-http_ssl_module \ | ||
+ | --without-http_ssi_module \ | ||
+ | --with-http_realip_module \ | ||
+ | --without-http_scgi_module \ | ||
+ | --without-http_uwsgi_module \ | ||
+ | --without-http_userid_module \ | ||
+ | -j${NPROC} \ | ||
+ | && make -j${NPROC} \ | ||
+ | && make install \ | ||
+ | \ | ||
+ | && rm -rf / | ||
+ | \ | ||
+ | ## LuaRocks | ||
+ | && curl -fsSL http:// | ||
+ | \ | ||
+ | && cd /tmp \ | ||
+ | && echo " | ||
+ | && tar -xzf luarocks.tar.gz \ | ||
+ | \ | ||
+ | && cd luarocks-* \ | ||
+ | && ./configure \ | ||
+ | --prefix=${OPENRESTY_PREFIX}/ | ||
+ | --lua-suffix=${LUA_SUFFIX} \ | ||
+ | --with-lua=${OPENRESTY_PREFIX}/ | ||
+ | --with-lua-lib=${OPENRESTY_PREFIX}/ | ||
+ | --with-lua-include=${OPENRESTY_PREFIX}/ | ||
+ | && make build \ | ||
+ | && make install \ | ||
+ | \ | ||
+ | && rm -rf / | ||
+ | && rm -rf ~/ | ||
+ | ## Post install | ||
+ | && ln -sf ${NGINX_PREFIX}/ | ||
+ | && ln -sf ${NGINX_PREFIX}/ | ||
+ | && ln -sf ${OPENRESTY_PREFIX}/ | ||
+ | && ln -sf ${OPENRESTY_PREFIX}/ | ||
+ | && ln -sf ${OPENRESTY_PREFIX}/ | ||
+ | && ln -sf ${OPENRESTY_PREFIX}/ | ||
+ | && ln -sf ${OPENRESTY_PREFIX}/ | ||
+ | && echo user=root >> / | ||
+ | ## Install lua-resty-openidc | ||
+ | && cd ~/ \ | ||
+ | # Fix for https:// | ||
+ | # && luarocks install lua-resty-hmac \ | ||
+ | && luarocks install lua-resty-openidc ${LUA_RESTY_OPENIDC_VERSION} \ | ||
+ | ## Install lua-resty-xacml-pep | ||
+ | # && curl -fsSL https:// | ||
+ | ## Cleanup | ||
+ | && apk del .build-dependencies 2>/ | ||
+ | |||
+ | WORKDIR $NGINX_PREFIX | ||
+ | |||
+ | CMD dnsmasq; openresty -g " | ||
+ | </ | ||
+ | |||
+ | ==== ConfigMap для openresty ==== | ||
+ | Создано по мотивам https:// | ||
+ | \\ | ||
+ | Описаны два виртуальных сервра (оба - прокси). \\ | ||
+ | * Первый - для аутентификации в **kibana** (аутентифицирует пользователя с помощью **OIDC**). | ||
+ | * Второй - для авторизации запросов от **kibana** в **elasticsearch**. Проверяется содержимое заголовков (список групп пользователя) в запросах от **Kibana** и разрешения определенных методов при доступе к **elasticsearch**. | ||
+ | < | ||
+ | apiVersion: v1 | ||
+ | metadata: | ||
+ | name: openresty-oidc-config | ||
+ | namespace: elasticsearch | ||
+ | data: | ||
+ | nginx.conf: | | ||
+ | worker_processes | ||
+ | events { | ||
+ | worker_connections | ||
+ | } | ||
+ | http { | ||
+ | include | ||
+ | default_type | ||
+ | sendfile | ||
+ | keepalive_timeout | ||
+ | gzip on; | ||
+ | ## | ||
+ | # LUA options | ||
+ | ## | ||
+ | lua_ssl_trusted_certificate / | ||
+ | lua_package_path ' | ||
+ | resolver 192.168.77.1; | ||
+ | # cache for discovery metadata documents | ||
+ | lua_shared_dict discovery 1m; | ||
+ | # cache for JWKs | ||
+ | lua_shared_dict jwks 1m; | ||
+ | # allow the server to close connection on non responding client, this will free up memory | ||
+ | reset_timedout_connection on; | ||
+ | | ||
+ | server { | ||
+ | listen 80 default_server; | ||
+ | server_name kibana.autosys.tk; | ||
+ | #access_log / | ||
+ | #error_log / | ||
+ | access_by_lua ' | ||
+ | local opts = { | ||
+ | redirect_uri = "/ | ||
+ | accept_none_alg = true, | ||
+ | discovery = " | ||
+ | client_id = " | ||
+ | client_secret = " | ||
+ | redirect_uri_scheme = " | ||
+ | logout_path = "/ | ||
+ | redirect_after_logout_uri = " | ||
+ | redirect_after_logout_with_id_token_hint = false, | ||
+ | session_contents = {id_token=true} | ||
+ | } | ||
+ | -- call introspect for OAuth 2.0 Bearer Access Token validation | ||
+ | local res, err = require(" | ||
+ | if err then | ||
+ | ngx.status = 403 | ||
+ | ngx.say(err) | ||
+ | ngx.exit(ngx.HTTP_FORBIDDEN) | ||
+ | end | ||
+ | -- set data from the ID token as HTTP Request headers | ||
+ | ngx.req.set_header(" | ||
+ | ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- ngx.req.set_header(" | ||
+ | -- Output headers to nginx err log | ||
+ | --ngx.log(ngx.ERR, | ||
+ | '; | ||
+ | expires | ||
+ | add_header | ||
+ | location / { | ||
+ | proxy_connect_timeout 5s; | ||
+ | proxy_pass http:// | ||
+ | } | ||
+ | } | ||
+ | server { | ||
+ | listen 9200; | ||
+ | access_log / | ||
+ | error_log / | ||
+ | access_by_lua ' | ||
+ | local restrictions = { | ||
+ | elasticsearch_ro = { | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | ["/ | ||
+ | ["/ | ||
+ | }, | ||
+ | elasticsearch_rw = { | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | ["/ | ||
+ | ["/ | ||
+ | }, | ||
+ | elasticsearch_full = { | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | [" | ||
+ | ["/ | ||
+ | } | ||
+ | } | ||
+ | local groups = ngx.req.get_headers()[" | ||
+ | local authenticated_group = nil | ||
+ | local ngx_re = require " | ||
+ | if ( type(groups) == " | ||
+ | groups = string.lower(groups) | ||
+ | ngx.log(ngx.ERR, | ||
+ | local groups_table = ngx_re.split(groups, | ||
+ | local groups_number = table.getn(groups_table) | ||
+ | ngx.log(ngx.ERR, | ||
+ | for i=1, | ||
+ | local group = groups_table[i]: | ||
+ | group = group: | ||
+ | ngx.log(ngx.ERR, | ||
+ | if (restrictions[group] ~= nil) then | ||
+ | ngx.log(ngx.ERR, | ||
+ | authenticated_group = group | ||
+ | break | ||
+ | end | ||
+ | end | ||
+ | -- exit 403 when no matching role has been found | ||
+ | if authenticated_group == nil then | ||
+ | ngx.header.content_type = " | ||
+ | ngx.log(ngx.ERR, | ||
+ | ngx.status = 403 | ||
+ | ngx.say(" | ||
+ | return ngx.exit(403) | ||
+ | end | ||
+ | | ||
+ | -- get URL | ||
+ | local uri = ngx.var.uri | ||
+ | ngx.log(ngx.DEBUG, | ||
+ | -- get method | ||
+ | local method = ngx.req.get_method() | ||
+ | ngx.log(ngx.DEBUG, | ||
+ | | ||
+ | local allowed | ||
+ | | ||
+ | for path, methods in pairs(restrictions[authenticated_group]) do | ||
+ | | ||
+ | -- path matched rules? | ||
+ | local p = string.match(uri, | ||
+ | | ||
+ | local m = nil | ||
+ | | ||
+ | -- method matched rules? | ||
+ | for _, _method in pairs(methods) do | ||
+ | m = m and m or string.match(method, | ||
+ | end | ||
+ | | ||
+ | if p and m then | ||
+ | allowed = true | ||
+ | ngx.log(ngx.NOTICE, | ||
+ | break | ||
+ | end | ||
+ | end | ||
+ | |||
+ | if not allowed then | ||
+ | ngx.header.content_type = " | ||
+ | ngx.log(ngx.WARN, | ||
+ | ngx.status = 403 | ||
+ | ngx.say(" | ||
+ | return ngx.exit(403) | ||
+ | end | ||
+ | end | ||
+ | '; | ||
+ | location / { | ||
+ | proxy_connect_timeout 15s; | ||
+ | proxy_pass http:// | ||
+ | } | ||
+ | } | ||
+ | } | ||
+ | </ | ||
+ | В конфигурации первого прокси аутентификацию выполняет блок **access_by_lua**. В **local opts** прописано следующее: | ||
+ | * **redirect_uri** - всегда будет **"/ | ||
+ | * **discovery** - ссылка с конфигурацией реалма. Обычно нужно заменить адрес хоста **keycloak** и имя реалма. | ||
+ | * **client_id** - задается в **keycloak** при создании клиента. | ||
+ | * **client_secret** - генерируется в **keycloak** | ||
+ | * **redirect_uri_scheme** - http или https | ||
+ | * **logout_path** - всегда будет **"/ | ||
+ | |||
+ | В конфигурации второго прокси авторизацию также выполняет блок **access_by_lua**: | ||
+ | * В блоке **local restrictions = {** содержится список таблиц (с именами групп), | ||
+ | * В данном алгоритме группы, | ||
+ | ==== openresty-oidc_deployment.yaml ==== | ||
+ | < | ||
+ | kind: Deployment | ||
+ | metadata: | ||
+ | name: openresty-oidc | ||
+ | namespace: elasticsearch | ||
+ | spec: | ||
+ | replicas: 1 | ||
+ | selector: | ||
+ | matchLabels: | ||
+ | app: openresty-oidc | ||
+ | template: | ||
+ | metadata: | ||
+ | labels: | ||
+ | app: openresty-oidc | ||
+ | spec: | ||
+ | imagePullSecrets: | ||
+ | - name: autosys-regcred | ||
+ | containers: | ||
+ | - name: openresty-oidc | ||
+ | image: registry.autosys.tk/ | ||
+ | volumeMounts: | ||
+ | - name: openresty-oidc-config-volume | ||
+ | mountPath: / | ||
+ | subPath: nginx.conf | ||
+ | volumes: | ||
+ | - name: openresty-oidc-config-volume | ||
+ | configMap: | ||
+ | name: openresty-oidc-config | ||
+ | </ | ||
+ | |||
+ | ==== openresty-oidc-service ==== | ||
+ | < | ||
+ | kind: Service | ||
+ | metadata: | ||
+ | name: openresty-oidc-http | ||
+ | namespace: elasticsearch | ||
+ | spec: | ||
+ | ports: | ||
+ | - name: http | ||
+ | port: 80 | ||
+ | protocol: TCP | ||
+ | targetPort: 80 | ||
+ | - name: elasticsearch | ||
+ | port: 9200 | ||
+ | protocol: TCP | ||
+ | targetPort: 9200 | ||
+ | selector: | ||
+ | app: openresty-oidc | ||
+ | sessionAffinity: | ||
+ | type: ClusterIP | ||
+ | </ | ||
+ | ==== kibana-values.yaml ==== | ||
+ | В конфигурации **kibana** нужно внести следующие изменения с помощью **helm upgrade**: | ||
+ | * Включить **elasticsearch.requestHeadersWhitelist**, | ||
+ | * Указать в качестве **elasticsearchHosts** адрес авторизующего **proxy**. | ||
+ | < | ||
+ | kibanaConfig: | ||
+ | kibana.yml: | | ||
+ | server.name: | ||
+ | server.host: | ||
+ | xpack.monitoring.ui.container.elasticsearch.enabled: | ||
+ | elasticsearch.requestHeadersWhitelist: | ||
+ | - authorization | ||
+ | - x-auth-groups | ||
+ | - x-auth-username | ||
+ | </ | ||
+ | |||
+ | ==== kibana-ingress.yaml ==== | ||
+ | < | ||
+ | apiVersion: extensions/ | ||
+ | kind: Ingress | ||
+ | metadata: | ||
+ | annotations: | ||
+ | cert-manager.io/ | ||
+ | kubernetes.io/ | ||
+ | name: kibana-ingress | ||
+ | namespace: elasticsearch | ||
+ | spec: | ||
+ | rules: | ||
+ | - host: kibana.autosys.tk | ||
+ | http: | ||
+ | paths: | ||
+ | - backend: | ||
+ | serviceName: | ||
+ | servicePort: | ||
+ | path: / | ||
+ | tls: | ||
+ | - hosts: | ||
+ | - kibana.autosys.tk | ||
+ | secretName: kibana-autosys-tk-tls | ||
+ | |||
+ | </ | ||
+ | |||
+ | |||
+ | ==== RBAC using Oidc proxy ==== | ||
+ | - С одной стороны - можно продолжать наполнять таблицу привилегий **OIDC**-proxy. Недостатки - большая и сложная таблица, | ||
+ | - C другой стороны - в коде OIDC-proxy средствами **ES API** можно реализовать механизм проверки наличия пользователя в базе **ES**, создания ее в случае отсутствия, | ||
+ | | ||
+ | API - https:// | ||
+ | ROles - https:// | ||
+ | | API | RO | RW | FULL | | ||
+ | | /_cluster | GET | GET | GET PUT POST DELETE | | ||
+ | | /_cat | GET | GET | GET | | ||
+ | | /_nodes | GET | GET | GET POST | | ||
+ | | /_remote | GET | GET | GET | | ||
+ | | / | ||
+ | | /_ccr | GET | GET | GET | | ||
+ | | * | GET | GET | GET | PUT | | ||
+ | ====== Разные проблемы ====== | ||
+ | ===== Произвольная деаутентификация клиента openresty oidc proxy ===== | ||
+ | Меня зачпокала ситуация, | ||
+ | |||
+ | ==== Причина ==== | ||
+ | Причина и решение описаны тут: https:// | ||
+ | Судя по всему - причина кроется в механизмах шифрования-расшифровывания. А именно - в сессионном ключе. Если явно не задано значение переменной **$session_secret**, | ||
+ | Решение - либо добавить в секцию **server** значение **$session_secret** длинной 32 байта:< | ||
+ | ... | ||
+ | set $session_secret T62DGscdGyb4So4tLsXhNIRdlEpt4J2k;</ | ||
+ | либо использовать единственный **worker**. | ||
+ | |||
+ | |||
+ | |||
+ | ===== Elasticsearch operator и OSS docker images ===== | ||
+ | При попытке использовать имиджи **Elasticsearch OSS 7.6.2**, для разворачивания кластера средствами **elasticsearch operator** все поды всегда уходили в бесконечный **InitError**.\\ | ||
+ | ==== Диагностика ==== | ||
+ | Смотрим describe любого пода и видим:< | ||
+ | Type | ||
+ | ---- | ||
+ | Normal | ||
+ | Normal | ||
+ | Normal | ||
+ | Normal | ||
+ | Warning | ||
+ | Смотрим логи контейнера **elastic-internal-init-filesystem** и видим:< | ||
+ | unsupported_distribution</ | ||
+ | Легкое гугление приводит нас сюда: https:// | ||
+ | available with the basic (free), gold and platinum licenses in order to ensure that | ||
+ | all clusters launched are secured by default. | ||
+ | |||
+ | A check is done in the prepare-fs script by looking at the existence of the | ||
+ | Elastic License. If not present, the script exit with a custom exit code. | ||
+ | |||
+ | Then the ES reconcilation loop sends an event of type warning if it detects that | ||
+ | a prepare-fs init container terminated with this exit code.</ | ||
+ | |||
+ | ===== Ошибки SSL ===== | ||
+ | Кластер развернут с помощью **elasticsearch operator 1.0.1**. | ||
+ | < | ||
+ | 19: | ||
+ | java.security.cert.CertificateException: | ||
+ | .... | ||
+ | .... | ||
+ | .... | ||
+ | SSL connection to https:// | ||
+ | Please check the elasticsearch SSL settings under xpack.security.http.ssl. | ||
+ | |||
+ | ERROR: Failed to establish SSL connection to elasticsearch at https:// | ||
+ | command terminated with exit code 78</ | ||
+ | |||
+ | В кластере единственный master. При попытке подключения утилита обращается по IP-адресу, | ||
+ | Как написано тут: https:// | ||
+ | < | ||
+ | xpack.security.http.ssl.verification_mode: | ||
+ | |||
+ | ===== Ошибка при начальной смене паролей bin/ | ||
+ | При выполнении команды **bin/ | ||
+ | Failed to authenticate user ' | ||
+ | Possible causes include: | ||
+ | * The password for the ' | ||
+ | * Your elasticsearch node is running against a different keystore | ||
+ | This tool used the keystore at / | ||
+ | Это значит, | ||
+ | kubectl get secrets elasticsearch-es-internal-users -o=jsonpath=' | ||
+ | и | ||
+ | kubectl get secret elasticsearch-es-elastic-user -o=jsonpath=' | ||
+ | Если на перед создание экземпляра кластера **elasticsearch** создать секреты **elasticsearch-es-internal-users** и **elasticsearch-es-elastic-user**, | ||
+ | А вообще - в ближайшее время **elasticsearch operator** позволит задавать пароли для встроенных учеток прямо в конфиге: | ||