Для бекапа на AWS-подобный S3 надо:
Для того чтобы прописать ключи - можно использовать файлик elasticsearch.yml или если ES развернут оператором eck, то с помощью спецификации указать на секрет, где хранятся ключи:
apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch spec: secureSettings: - secretName: s3-backup-keys
А в секрете собственно прописать ключи:
apiVersion: v1 kind: Secret metadata: name: s3-backup-keys type: Opaque data: s3.client.default.access_key: 0L/Ri9GJLdC/0YvRiSAtYgo= s3.client.default.secret_key: 0L4t0LvQvi3Qu9C+IC1iCg==
То есть ключи секрета - представляют собой параметры, аналогичные прописываемым в elasticsearch.yml. В данном случае - это параметры для s3.client с именем default
Скрипт для создания снапшотов по расписанию может выглядеть так:
from elasticsearch import Elasticsearch from datetime import datetime import argparse def main(args): es = Elasticsearch(hosts=args.es_url, http_auth=(args.es_login, args.es_password), url_prefix=args.es_url_prefix) print(es.snapshot.get_repository()) # https://www.elastic.co/guide/en/elasticsearch/reference/current/repository-s3.html # https://kubedb.com/docs/v2021.09.30/guides/elasticsearch/plugins-backup/s3-repository/ # https://elasticsearch-py.readthedocs.io/en/v8.2.3/api.html#elasticsearch.client.SnapshotClient.create_repository # https://elasticsearch-py.readthedocs.io/en/v8.2.3/api.html#elasticsearch.client.SnapshotClient.get_repository repo_definition={ "type": "s3", "settings": { "endpoint": f"{args.s3_endpoint}", "bucket": f"{args.s3_bucket_name}", "base_path": f"{args.base_path}", "region": f"{args.s3_region}" } } es.snapshot.create_repository(repository="s3-backup", body=repo_definition) es.snapshot.create(repository="s3-backup", snapshot=f'snapshot_'+datetime.now().strftime("%d-%m-%Y_%H-%M-%S")) snapshots=es.snapshot.get(snapshot='*',repository="s3-backup") for snapshot in snapshots["snapshots"]: print(snapshot["snapshot"]) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--es_url', default='https://es.domail.local:443') parser.add_argument('--es_login', default='elastic') parser.add_argument('--es_password', default='superpassword') parser.add_argument('--es_url_prefix', default='es') parser.add_argument('--base_path', default='dev') parser.add_argument('--s3_bucket_name', default='es-backup') parser.add_argument('--s3_endpoint', default='https://storage.yandexcloud.net') parser.add_argument('--s3_region', default='ru-central1') args = parser.parse_args() main(args)
Предварительно, в кластере куда мы мигрируем нужно разрешить миграцию из заданного источника. В случае использования оператора ECK это делается так:
apiVersion: elasticsearch.k8s.elastic.co/v1 kind: Elasticsearch spec: nodeSets: - config: reindex.remote.whitelist: SOURCE_ES_IP:9200 reindex.ssl.verification_mode: none
И дальше - просто воспользоваться модулем elasticsearch для python и скриптом:
from elasticsearch import Elasticsearch import argparse def main(args): es_source = Elasticsearch(hosts=args.source_es_url, http_auth=(args.source_es_login, args.source_es_password)) es_dest = Elasticsearch(hosts=args.dest_es_url, http_auth=(args.dest_es_login, args.dest_es_password),ca_certs=False,verify_certs=False) for index in es_source.indices.get('*'): print(index) es_dest.reindex({ "source": { "remote": { "host": f"{args.source_es_url}", "username": f"{args.source_es_login}", "password": f"{args.source_es_password}" }, "index": f"{index}", "query": { "match_all": {} } }, "dest": { "index": f"{index}" } }) print('#################################################################') print(es_dest.indices.get('*')) # for index in es_dest.indices.get('*'): # print(index) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--source_es_url', default='http://SOURCE_ES:9200') parser.add_argument('--source_es_login', default='elastic') parser.add_argument('--source_es_password', default='password') parser.add_argument('--dest_es_url', default='http://DESTINATION_ES:9200') parser.add_argument('--dest_es_login', default='elastic') parser.add_argument('--dest_es_password', default='password') args = parser.parse_args() main(args)
curl http://~~~elastic_client_IP~~~/_aliases?pretty=true
Или вот так с basic auth:
AUTH=elastic:AG7dGmsZp2NDpRED curl -k -u $AUTH https://elasticsearch-es-http.default.svc.cluster.local:9200/_aliases?pretty=true
curl http://~~~elastic_client_IP~~~/_cat/indices?pretty=true
или запрос в DevTools в Kibana
GET /_cat/indices?v
while read -r line; do echo $line | awk '{print $10}'; done <<< `curl -k -u elastic:mSSZ6epX2TQqGE4W https://10.110.63.47:9200/_cat/indices?bytes=b`
https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html
curl -X POST "localhost:9200/_security/user/jacknich?pretty" -H 'Content-Type: application/json' -d' { "password" : "j@rV1s", "roles" : [ "admin", "other_role1" ], "full_name" : "Jack Nicholson", "email" : "jacknich@example.com", "metadata" : { "intelligence" : 7 } } '
https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html
curl -X GET "localhost:9200/_security/user/jacknich?pretty"
[logstash.outputs.elasticsearch] retrying failed action with response code: 403 ({"type"=>"cluster_block_exception", "reason"=>"blocked by: [FORBIDDEN/12/index read-only / allow delete (api)];"})
Нужно дать права на запись в индексы elasticsearch:
curl -XPUT -H "Content-Type: application/json" http://~~~IP~address~elasticsearch~client~~~:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}'
{"type":"log","@timestamp":"2020-05-22T09:20:30Z","tags":["info","savedobjects-service"],"pid":6,"message":"Creating index .kibana_1."} {"type":"log","@timestamp":"2020-05-22T09:21:02Z","tags":["warning","savedobjects-service"],"pid":6,"message":"Unable to connect to Elasticsearch. Error: [resource_already_exists_exception] index [.kibana_1/N5lCo69cSEiaGPrH9nPgmQ] already exists, with { index_uuid=\"N5lCo69cSEiaGPrH9nPgmQ\" & index=\".kibana_1\" }"} {"type":"log","@timestamp":"2020-05-22T09:21:02Z","tags":["warning","savedobjects-service"],"pid":6,"message":"Another Kibana instance appears to be migrating the index. Waiting for that migration to complete. If no other Kibana instance is attempting migrations, you can get past this message by deleting index .kibana_1 and restarting Kibana."} {"type":"log","@timestamp":"2020-05-22T09:21:35Z","tags":["info","savedobjects-service"],"pid":6,"message":"Creating index .kibana_task_manager_1."} {"type":"log","@timestamp":"2020-05-22T09:21:35Z","tags":["warning","savedobjects-service"],"pid":6,"message":"Unable to connect to Elasticsearch. Error: [resource_already_exists_exception] index [.kibana_task_manager_1/57SevyHNSW2u0pDBMxS6rg] already exists, with { index_uuid=\"57SevyHNSW2u0pDBMxS6rg\" & index=\".kibana_task_manager_1\" }"} {"type":"log","@timestamp":"2020-05-22T09:21:35Z","tags":["warning","savedobjects-service"],"pid":6,"message":"Another Kibana instance appears to be migrating the index. Waiting for that migration to complete. If no other Kibana instance is attempting migrations, you can get past this message by deleting index .kibana_task_manager_1 and restarting Kibana."}
Нужно удалить индексы:
kubectl exec elasticsearch-kb-74b55648bb-l5pf8 -- curl -k -u elastic:AG7dGmsZp2NDpRED -XDELETE http://elasticsearch-openresty-oidc-http.default.svc.cluster.local:9200/.kibana_1
или в более новых версиях:
kubectl exec -it -n elk elastic-kb-84dddb9b5-k4kwg -- curl -k -u elastic:MWtBKGDF2qnKr889 -XDELETE https://elastic-es-http.elk.svc.cluster.local:9200/.kibana_task_manager_1