diff --git a/roles/gpaas/efk/elasticsearch/defaults/main.yml b/roles/gpaas/efk/elasticsearch/defaults/main.yml deleted file mode 100644 index 66572ed..0000000 --- a/roles/gpaas/efk/elasticsearch/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ -elasticsearch_image: toyangdon/elasticsearch-oss:7.10.2 -alpine_image: library/alpine:3.6 -elasticsearch_indices_cleanup_image: toyangdon/es-index-cleaner:v0.1 - -es_data_path: >- - {{data_dir}}/es diff --git a/roles/gpaas/efk/elasticsearch/tasks/container.yml b/roles/gpaas/efk/elasticsearch/tasks/container.yml deleted file mode 100644 index 9a741b5..0000000 --- a/roles/gpaas/efk/elasticsearch/tasks/container.yml +++ /dev/null @@ -1,20 +0,0 @@ -- name: 创建es-statefulset.yaml文件 - template: src=es-statefulset.yaml dest={{ base_dir }}/manifests/es-statefulset.yaml - delegate_to: localhost - run_once: true - when: not ansible_check_mode - -- name: 部署es-statefulset - shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/es-statefulset.yaml" - delegate_to: localhost - run_once: true - -- name: 创建es-indices-cleanup.yaml文件 - template: src=es-indices-cleanup.yaml dest={{ base_dir }}/manifests/es-indices-cleanup.yaml - delegate_to: localhost - run_once: true - -- name: 部署es-index-clean-job - shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/es-indices-cleanup.yaml" - delegate_to: localhost - run_once: true diff --git a/roles/gpaas/efk/elasticsearch/tasks/main.yml b/roles/gpaas/efk/elasticsearch/tasks/main.yml deleted file mode 100644 index 08ce51d..0000000 --- a/roles/gpaas/efk/elasticsearch/tasks/main.yml +++ /dev/null @@ -1,29 +0,0 @@ -- name: 创建数据目录 - file: name={{es_data_path}}/data state=directory mode=0755 owner=1000 group=1000 recurse=yes - -- name: 创建日志目录 - file: name={{es_data_path}}/logs state=directory mode=0755 owner=1000 group=1000 recurse=yes - -#- name: 修改系统文件限制 -# sysctl: -# name: vm.max_map_count -# value: 655360 -# sysctl_set: yes -# state: present -# reload: yes - -- include_tasks : container.yml - -#- name: 等待cloud-service服务启动成功 -# shell: "{{bin_dir}}/kubectl get statefulset elasticsearch -n kube-system -o jsonpath='{.status.readyReplicas}'" -# register: status_result -# until: 'groups["elasticsearch"]|length|string == status_result.stdout' -# retries: 50 -# delay: 5 -# run_once: true - -#- name: 重启es节点,目前发现部署elasticsearch节点会出现网络问题,重启可以恢复 -# shell: shutdown -r - -#- name: wait for connecting -# wait_for_connection: delay=60 diff --git a/roles/gpaas/efk/elasticsearch/templates/es-indices-cleanup.yaml b/roles/gpaas/efk/elasticsearch/templates/es-indices-cleanup.yaml deleted file mode 100644 index 725a9dd..0000000 --- a/roles/gpaas/efk/elasticsearch/templates/es-indices-cleanup.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: es-index-cleaner - namespace: kube-system -spec: - # 每天1点3分执行 - schedule: "3 1 */1 * *" - jobTemplate: - spec: - template: - spec: - containers: - - name: es-index-cleaner - image: {{BASE_IMAGE_URL}}/{{elasticsearch_indices_cleanup_image}} - restartPolicy: OnFailure - successfulJobsHistoryLimit: 3 - failedJobsHistoryLimit: 3 diff --git a/roles/gpaas/efk/elasticsearch/templates/es-statefulset.yaml b/roles/gpaas/efk/elasticsearch/templates/es-statefulset.yaml deleted file mode 100644 index 6760fcb..0000000 --- a/roles/gpaas/efk/elasticsearch/templates/es-statefulset.yaml +++ /dev/null @@ -1,248 +0,0 @@ -# RBAC authn and authz -apiVersion: v1 -kind: ServiceAccount -metadata: - name: elasticsearch-logging - namespace: kube-system - labels: - k8s-app: elasticsearch-logging - addonmanager.kubernetes.io/mode: Reconcile ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: elasticsearch-logging - labels: - k8s-app: elasticsearch-logging - addonmanager.kubernetes.io/mode: Reconcile -rules: -- apiGroups: - - "" - resources: - - "services" - - "namespaces" - - "endpoints" - verbs: - - "get" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: kube-system - name: elasticsearch-logging - labels: - k8s-app: elasticsearch-logging - addonmanager.kubernetes.io/mode: Reconcile -subjects: -- kind: ServiceAccount - name: elasticsearch-logging - namespace: kube-system - apiGroup: "" -roleRef: - kind: ClusterRole - name: elasticsearch-logging - apiGroup: "" ---- - -apiVersion: v1 -kind: ConfigMap -metadata: - name: es-config - namespace: kube-system -data: - elasticsearch.yml: | - path.data: {{es_data_path}}/data - path.logs: {{es_data_path}}/logs - #http.host: 0.0.0.0 - http.cors.enabled: true - http.cors.allow-origin: "*" - network.host: 0.0.0.0 - node.name: ${HOSTNAME}.elasticsearch - cluster.initial_master_nodes: [{%- for node in groups['elasticsearch'] -%}"elasticsearch-{{loop.index - 1}}.elasticsearch"{% if not loop.last %},{% endif %}{%- endfor -%}] - cluster.name: elasticsearch - discovery.zen.ping.unicast.hosts: [{%- for node in groups['elasticsearch'] -%}"elasticsearch-{{loop.index - 1}}.elasticsearch"{% if not loop.last %},{% endif %}{%- endfor -%}] - discovery.zen.minimum_master_nodes: {{(groups['elasticsearch']|length) // 2 + 1}} - discovery.zen.ping_timeout: 5s - ---- ---- -# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: elasticsearch - namespace: kube-system - labels: - k8s-app: elasticsearch - addonmanager.kubernetes.io/mode: Reconcile -spec: - serviceName: elasticsearch - replicas: {{groups['elasticsearch']|length}} - selector: - matchLabels: - k8s-app: elasticsearch - template: - metadata: - labels: - k8s-app: elasticsearch - kubernetes.io/cluster-service: "true" - spec: -# hostNetwork: true - tolerations: -{% for taint in node_taints.split(",") %} - - operator: "Exists" - effect: "NoSchedule" - key: {{taint|replace("=:NoSchedule","")}} -{% endfor %} - nodeSelector: - "node.kubernetes.io/elasticsearch": "true" - affinity: -# nodeAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: node-role.kubernetes.io/es -# operator: Exists - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: k8s-app - operator: In - values: - - elasticsearch - topologyKey: kubernetes.io/hostname - containers: - - image: "{{BASE_IMAGE_URL}}/{{ elasticsearch_image }}" - name: elasticsearch - resources: - # need more cpu upon initialization, therefore burstable class - limits: - cpu: 4 - memory: "6Gi" - requests: - cpu: 2 - memory: 3Gi - ports: - - containerPort: 9200 - name: db - protocol: TCP - - containerPort: 9300 - name: transport - protocol: TCP - livenessProbe: - tcpSocket: - port: transport - initialDelaySeconds: 30 - timeoutSeconds: 10 - readinessProbe: - tcpSocket: - port: transport - initialDelaySeconds: 30 - timeoutSeconds: 10 - volumeMounts: - - name: es-config - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml - subPath: elasticsearch.yml - - name: es-persistent-storage - mountPath: /data/es - env: - - name: "NAMESPACE" - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: "ES_JAVA_OPTS" - value: "-Xms3g -Xmx3g" - securityContext: - privileged: true - volumes: - - name: es-config - configMap: - name: es-config - items: - - key: elasticsearch.yml - path: elasticsearch.yml - initContainers: - - image: {{BASE_IMAGE_URL}}/{{ alpine_image }} - command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] - name: elasticsearch-init - securityContext: - privileged: true - volumeClaimTemplates: - - metadata: - name: es-persistent-storage - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi - storageClassName: local-storage - selector: - matchLabels: - pv: elasticsearch ---- -kind: Service -apiVersion: v1 -metadata: - labels: - elastic-app: elasticsearch-service - name: elasticsearch-service - namespace: kube-system -spec: - ports: - - port: 9200 - name: db - targetPort: 9200 - - port: 9300 - name: transport - targetPort: 9300 - selector: - k8s-app: elasticsearch ---- -kind: Service -apiVersion: v1 -metadata: - labels: - elastic-app: elasticsearch-service - name: elasticsearch - namespace: kube-system -spec: - ports: - - port: 9200 - name: db - targetPort: 9200 - - port: 9300 - name: transport - targetPort: 9300 - selector: - k8s-app: elasticsearch - clusterIP: None - ---- -{% for node in groups['elasticsearch'] %} ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: es-persistent-storage-{{hostvars[node].NODE_ID}} - labels: - pv: elasticsearch -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 100Gi - local: - path: {{es_data_path}} - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/hostname - operator: In - values: - - "{{hostvars[node].NODE_ID}}" - persistentVolumeReclaimPolicy: Retain - storageClassName: local-storage -{% endfor %} diff --git a/roles/gpaas/efk/fluentd/defaults/main.yml b/roles/gpaas/efk/fluentd/defaults/main.yml deleted file mode 100644 index d7f2574..0000000 --- a/roles/gpaas/efk/fluentd/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ -fluentd_image: toyangdon/fluentd-es-image-arm:1.12.0 diff --git a/roles/gpaas/efk/fluentd/tasks/container.yml b/roles/gpaas/efk/fluentd/tasks/container.yml deleted file mode 100644 index 2a185b7..0000000 --- a/roles/gpaas/efk/fluentd/tasks/container.yml +++ /dev/null @@ -1,7 +0,0 @@ -- name: 创建fluentd.yaml文件 - template: src=fluentd.yaml dest={{ base_dir }}/manifests/fluentd.yaml - delegate_to: localhost - -- name: 部署fluentd.yaml - shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/fluentd.yaml -n kube-system" - delegate_to: localhost diff --git a/roles/gpaas/efk/fluentd/tasks/main.yml b/roles/gpaas/efk/fluentd/tasks/main.yml deleted file mode 100644 index dffa6bc..0000000 --- a/roles/gpaas/efk/fluentd/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ -- include_tasks : container.yml \ No newline at end of file diff --git a/roles/gpaas/efk/fluentd/templates/fluentd.yaml b/roles/gpaas/efk/fluentd/templates/fluentd.yaml deleted file mode 100644 index 630f41a..0000000 --- a/roles/gpaas/efk/fluentd/templates/fluentd.yaml +++ /dev/null @@ -1,481 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: fluentd-es - labels: - k8s-app: fluentd-es - addonmanager.kubernetes.io/mode: Reconcile ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: fluentd-es - labels: - k8s-app: fluentd-es - addonmanager.kubernetes.io/mode: Reconcile -rules: -- apiGroups: - - "" - resources: - - "namespaces" - - "pods" - verbs: - - "get" - - "watch" - - "list" ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: fluentd-es - labels: - k8s-app: fluentd-es - addonmanager.kubernetes.io/mode: Reconcile -subjects: -- kind: ServiceAccount - name: fluentd-es - namespace: kube-system - apiGroup: "" -roleRef: - kind: ClusterRole - name: fluentd-es - apiGroup: "" ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: fluentd-es-v3.1.1 - labels: - k8s-app: fluentd-es - version: v3.1.1 - addonmanager.kubernetes.io/mode: Reconcile -spec: - selector: - matchLabels: - k8s-app: fluentd-es - version: v3.1.1 - template: - metadata: - labels: - k8s-app: fluentd-es - version: v3.1.1 - spec: - priorityClassName: system-node-critical - serviceAccountName: fluentd-es - containers: - - name: fluentd-es - image: {{BASE_IMAGE_URL}}/{{fluentd_image}} - env: - - name: FLUENTD_ARGS - value: --no-supervisor -q - resources: - limits: - memory: 500Mi - requests: - cpu: 100m - memory: 200Mi - volumeMounts: - - name: varlog - mountPath: /var/log - - name: datalog - mountPath: /data/log - - name: varlibdockercontainers - mountPath: /data/docker/containers - readOnly: true - - name: config-volume - mountPath: /etc/fluent/config.d - ports: - - containerPort: 24231 - name: prometheus - protocol: TCP - livenessProbe: - tcpSocket: - port: prometheus - initialDelaySeconds: 5 - timeoutSeconds: 10 - readinessProbe: - tcpSocket: - port: prometheus - initialDelaySeconds: 5 - timeoutSeconds: 10 - terminationGracePeriodSeconds: 30 - volumes: - - name: varlog - hostPath: - path: /var/log - - name: datalog - hostPath: - path: /data/log - - name: varlibdockercontainers - hostPath: - path: /data/docker/containers - - name: config-volume - configMap: - name: fluentd-es-config-v0.2.1 ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: fluentd-es-config-v0.2.1 - labels: - addonmanager.kubernetes.io/mode: Reconcile -data: - system.conf: |- - - root_dir /tmp/fluentd-buffers/ - - containers.input.conf: |- - # This configuration file for Fluentd / td-agent is used - # to watch changes to Docker log files. The kubelet creates symlinks that - # capture the pod name, namespace, container name & Docker container ID - # to the docker logs for pods in the /var/log/containers directory on the host. - # If running this fluentd configuration in a Docker container, the /var/log - # directory should be mounted in the container. - # - # These logs are then submitted to Elasticsearch which assumes the - # installation of the fluent-plugin-elasticsearch & the - # fluent-plugin-kubernetes_metadata_filter plugins. - # See https://github.com/uken/fluent-plugin-elasticsearch & - # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for - # more information about the plugins. - # - # Example - # ======= - # A line in the Docker log file might look like this JSON: - # - # {"log":"2014/09/25 21:15:03 Got request with path wombat\n", - # "stream":"stderr", - # "time":"2014-09-25T21:15:03.499185026Z"} - # - # The time_format specification below makes sure we properly - # parse the time format produced by Docker. This will be - # submitted to Elasticsearch and should appear like: - # $ curl 'http://elasticsearch-logging:9200/_search?pretty' - # ... - # { - # "_index" : "logstash-2014.09.25", - # "_type" : "fluentd", - # "_id" : "VBrbor2QTuGpsQyTCdfzqA", - # "_score" : 1.0, - # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n", - # "stream":"stderr","tag":"docker.container.all", - # "@timestamp":"2014-09-25T22:45:50+00:00"} - # }, - # ... - # - # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log - # record & add labels to the log record if properly configured. This enables users - # to filter & search logs on any metadata. - # For example a Docker container's logs might be in the directory: - # - # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b - # - # and in the file: - # - # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log - # - # where 997599971ee6... is the Docker ID of the running container. - # The Kubernetes kubelet makes a symbolic link to this file on the host machine - # in the /var/log/containers directory which includes the pod name and the Kubernetes - # container name: - # - # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # -> - # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log - # - # The /var/log directory on the host is mapped to the /var/log directory in the container - # running this instance of Fluentd and we end up collecting the file: - # - # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # - # This results in the tag: - # - # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # - # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name - # which are added to the log message as a kubernetes field object & the Docker container ID - # is also added under the docker field object. - # The final tag is: - # - # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log - # - # And the final log record look like: - # - # { - # "log":"2014/09/25 21:15:03 Got request with path wombat\n", - # "stream":"stderr", - # "time":"2014-09-25T21:15:03.499185026Z", - # "kubernetes": { - # "namespace": "default", - # "pod_name": "synthetic-logger-0.25lps-pod", - # "container_name": "synth-lgr" - # }, - # "docker": { - # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b" - # } - # } - # - # This makes it easier for users to search for logs by pod name or by - # the name of the Kubernetes container regardless of how many times the - # Kubernetes pod has been restarted (resulting in a several Docker container IDs). - # Json Log Example: - # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} - # CRI Log Example: - # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here - - @id fluentd-containers.log - @type tail - path /var/log/containers/*.log - pos_file /var/log/es-containers.log.pos - tag raw.kubernetes.* - read_from_head true - - @type multi_format - - format json - time_key time - time_format %Y-%m-%dT%H:%M:%S.%NZ - - - format /^(? - - - # Detect exceptions in the log output and forward them as one log entry. - - @id raw.kubernetes - @type detect_exceptions - remove_tag_prefix raw - message log - stream stream - multiline_flush_interval 5 - max_bytes 500000 - max_lines 1000 - - # Concatenate multi-line logs - - @id filter_concat - @type concat - key message - multiline_end_regexp /\n$/ - separator "" - - # Enriches records with Kubernetes metadata - - @id filter_kubernetes_metadata - @type kubernetes_metadata - - # Fixes json fields in Elasticsearch - - @id filter_parser - @type parser - key_name log - reserve_data true - remove_key_name_field true - - @type multi_format - - format json - - - format none - - - - system.input.conf: |- - # Example: - # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 - - @id minion - @type tail - format /^(?