From 6b3cce57c925d3786c5bd7a29e1e70d1274b120a Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Aug 2023 15:32:44 +0800 Subject: [PATCH] update --- group_vars/all | 10 +- hosts | 31 +- manifests/calico.yaml | 455 + manifests/coredns.yaml | 180 + manifests/es-indices-cleanup.yaml | 18 + manifests/es-statefulset.yaml | 268 + manifests/fluentd.yaml | 481 + manifests/glusterfs/gluster-storageclass.yaml | 13 + manifests/glusterfs/glusterfs-daemonset.yaml | 157 + manifests/glusterfs/heketi-desployment.yaml | 132 + manifests/glusterfs/topology.json | 53 + manifests/kibana.yaml | 67 + manifests/kube-proxy.yaml | 110 + manifests/kubernetes-dashboard.yaml | 320 + manifests/local-storage.yaml | 6 + manifests/metrics-server.yaml | 143 + manifests/openvpn.yaml | 256 + manifests/prometheus/@ | 14 + .../prometheus/alertmanager-alertmanager.yaml | 18 + .../prometheus/alertmanager-pvc.yaml.bak | 9 + manifests/prometheus/alertmanager-secret.yaml | 8 + .../prometheus/alertmanager-service.yaml | 18 + .../alertmanager-serviceAccount.yaml | 5 + .../alertmanager-serviceMonitor.yaml | 14 + .../grafana-dashboardDatasources.yaml | 8 + .../grafana-dashboardDefinitions.yaml | 38229 ++++++++++++++++ .../prometheus/grafana-dashboardSources.yaml | 21 + manifests/prometheus/grafana-deployment.yaml | 216 + manifests/prometheus/grafana-service.yaml | 16 + .../prometheus/grafana-serviceAccount.yaml | 5 + .../prometheus/grafana-serviceMonitor.yaml | 12 + .../kube-state-metrics-clusterRole.yaml | 91 + ...kube-state-metrics-clusterRoleBinding.yaml | 12 + .../kube-state-metrics-deployment.yaml | 72 + .../prometheus/kube-state-metrics-role.yaml | 30 + .../kube-state-metrics-roleBinding.yaml | 12 + .../kube-state-metrics-service.yaml | 18 + .../kube-state-metrics-serviceAccount.yaml | 5 + .../kube-state-metrics-serviceMonitor.yaml | 30 + .../prometheus/node-exporter-clusterRole.yaml | 17 + .../node-exporter-clusterRoleBinding.yaml | 12 + .../prometheus/node-exporter-daemonset.yaml | 87 + .../prometheus/node-exporter-service.yaml | 15 + .../node-exporter-serviceAccount.yaml | 5 + .../node-exporter-serviceMonitor.yaml | 26 + .../prometheus-adapter-apiService.yaml | 13 + .../prometheus-adapter-clusterRole.yaml | 16 + ...er-clusterRoleAggregatedMetricsReader.yaml | 17 + ...prometheus-adapter-clusterRoleBinding.yaml | 12 + ...s-adapter-clusterRoleBindingDelegator.yaml | 12 + ...us-adapter-clusterRoleServerResources.yaml | 11 + .../prometheus-adapter-configMap.yaml | 33 + .../prometheus-adapter-deployment.yaml | 52 + ...metheus-adapter-roleBindingAuthReader.yaml | 13 + .../prometheus-adapter-service.yaml | 16 + .../prometheus-adapter-serviceAccount.yaml | 5 + .../prometheus/prometheus-clusterRole.yaml | 32 + .../prometheus-clusterRoleBinding.yaml | 12 + .../prometheus-operator-serviceMonitor.yaml | 18 + .../prometheus/prometheus-prometheus.yaml | 33 + .../prometheus-roleBindingConfig.yaml | 13 + ...metheus-roleBindingSpecificNamespaces.yaml | 42 + .../prometheus/prometheus-roleConfig.yaml | 12 + .../prometheus-roleSpecificNamespaces.yaml | 51 + manifests/prometheus/prometheus-rules.yaml | 1206 + manifests/prometheus/prometheus-service.yaml | 19 + .../prometheus/prometheus-serviceAccount.yaml | 5 + .../prometheus/prometheus-serviceMonitor.yaml | 14 + .../prometheus-serviceMonitorApiserver.yaml | 37 + .../prometheus-serviceMonitorCoreDNS.yaml | 19 + ...s-serviceMonitorKubeControllerManager.yaml | 23 + ...rometheus-serviceMonitorKubeScheduler.yaml | 18 + .../prometheus-serviceMonitorKubelet.yaml | 44 + .../setup/0namespace-namespace.yaml | 4 + ...0alertmanagerCustomResourceDefinition.yaml | 4629 ++ ...r-0podmonitorCustomResourceDefinition.yaml | 239 + ...r-0prometheusCustomResourceDefinition.yaml | 5541 +++ ...rometheusruleCustomResourceDefinition.yaml | 250 + ...ervicemonitorCustomResourceDefinition.yaml | 346 + .../prometheus-operator-clusterRole.yaml | 73 + ...rometheus-operator-clusterRoleBinding.yaml | 16 + .../setup/prometheus-operator-deployment.yaml | 48 + .../setup/prometheus-operator-service.yaml | 18 + .../prometheus-operator-serviceAccount.yaml | 9 + .../adapter-api/deployment.yaml | 123 + .../adapter-api/service.yaml | 16 + .../proprietary-cloud/app-api/deployment.yaml | 123 + .../proprietary-cloud/app-api/service.yaml | 15 + .../proprietary-cloud/auth/deployment.yaml | 123 + manifests/proprietary-cloud/auth/service.yaml | 15 + .../baremetal/deployment.yaml | 123 + .../proprietary-cloud/baremetal/service.yaml | 15 + .../proprietary-cloud/bbs-api/deployment.yaml | 123 + .../proprietary-cloud/bbs-api/service.yaml | 16 + manifests/proprietary-cloud/biz-data.sql | 972 + .../blog-api/deployment.yaml | 123 + .../proprietary-cloud/blog-api/service.yaml | 15 + .../campaigns-api/deployment.yaml | 123 + .../campaigns-api/service.yaml | 15 + .../charts-api/deployment.yaml | 123 + .../proprietary-cloud/charts-api/service.yaml | 15 + .../cloud-api/deployment.yaml | 123 + .../proprietary-cloud/cloud-api/service.yaml | 15 + .../comment-api/deployment.yaml | 123 + .../comment-api/service.yaml | 15 + manifests/proprietary-cloud/config.sql | 2829 ++ .../daily-api/deployment.yaml | 123 + .../proprietary-cloud/daily-api/service.yaml | 15 + .../database/mysql-cluster.yaml | 224 + .../database/mysql-operator.yaml | 465 + .../mysql.presslabs.org_mysqlbackups.yaml | 88 + .../mysql.presslabs.org_mysqlclusters.yaml | 3736 ++ .../mysql.presslabs.org_mysqldatabases.yaml | 113 + .../mysql.presslabs.org_mysqlusers.yaml | 167 + manifests/proprietary-cloud/db.sql | 10697 +++++ .../proprietary-cloud/edu-api/deployment.yaml | 123 + .../proprietary-cloud/edu-api/service.yaml | 15 + .../proprietary-cloud/elasticsearch.yaml | 129 + .../proprietary-cloud/es-api/deployment.yaml | 123 + .../proprietary-cloud/es-api/service.yaml | 15 + .../file-api/deployment.yaml | 123 + .../proprietary-cloud/file-api/service.yaml | 19 + .../proprietary-cloud/gateway/deployment.yaml | 123 + .../proprietary-cloud/gateway/service.yaml | 15 + manifests/proprietary-cloud/init_config.sql | 25 + .../mall-api/deployment.yaml | 123 + .../proprietary-cloud/mall-api/service.yaml | 15 + .../proprietary-cloud/mcu/deployment.yaml | 123 + manifests/proprietary-cloud/mcu/service.yaml | 15 + manifests/proprietary-cloud/nacos.sql | 220 + manifests/proprietary-cloud/nacos.yaml | 164 + manifests/proprietary-cloud/namespace.yaml | 4 + .../points-api/deployment.yaml | 123 + .../proprietary-cloud/points-api/service.yaml | 15 + .../recruit-api/deployment.yaml | 123 + .../recruit-api/service.yaml | 15 + .../proprietary-cloud/redis-sentinel.yaml | 937 + .../proprietary-cloud/rocketmq/operator.yaml | 48 + .../rocketmq-exporter/rocketmq-exporter.yaml | 137 + .../rocketmq_v1alpha1_broker_crd.yaml | 133 + .../rocketmq_v1alpha1_cluster_service.yaml | 47 + .../rocketmq_v1alpha1_consoles_crd.yaml | 4710 ++ .../rocketmq_v1alpha1_nameservice_crd.yaml | 110 + .../rocketmq_v1alpha1_rocketmq_cluster.yaml | 318 + .../rocketmq_v1alpha1_topictransfer_crd.yaml | 63 + .../proprietary-cloud/rocketmq/role.yaml | 74 + .../rocketmq/role_binding.yaml | 26 + .../rocketmq/service_account.yaml | 19 + manifests/proprietary-cloud/skywalking.yaml | 644 + .../user-api/deployment.yaml | 123 + .../proprietary-cloud/user-api/service.yaml | 15 + manifests/proprietary-cloud/user-data.sql | 78 + .../proprietary-cloud/web-ui/configmap.yaml | 105 + .../proprietary-cloud/web-ui/deployment.yaml | 48 + .../proprietary-cloud/web-ui/service.yaml | 15 + manifests/traefik-ingress.yaml | 119 + .../templates/gluster-storageclass.yaml | 2 +- roles/proprietary-cloud/defaults/main.yml | 8 +- roles/proprietary-cloud/tasks/cloud.yml | 9 +- roles/proprietary-cloud/tasks/main.yml | 2 + roles/proprietary-cloud/tasks/nacos.yml | 7 +- .../templates/adapter-api/deployment.yaml | 6 +- .../templates/adapter-api/service.yaml | 9 +- .../templates/app-api/deployment.yaml | 6 +- .../templates/auth/deployment.yaml | 6 +- .../templates/baremetal/deployment.yaml | 6 +- .../templates/bbs-api/deployment.yaml | 6 +- .../templates/bbs-api/service.yaml | 9 +- .../templates/blog-api/deployment.yaml | 6 +- .../templates/campaigns-api/deployment.yaml | 6 +- .../templates/cloud-api/deployment.yaml | 6 +- .../templates/comment-api/deployment.yaml | 6 +- .../templates/daily-api/deployment.yaml | 6 +- .../templates/database/mysql-cluster.yaml | 11 +- .../templates/doc-api/deployment.yaml | 6 +- .../templates/edu-api/deployment.yaml | 6 +- .../templates/es-api/deployment.yaml | 6 +- .../templates/file-api/deployment.yaml | 6 +- .../templates/init_config.sql | 22 + .../templates/mall-api/deployment.yaml | 6 +- .../templates/mcu/deployment.yaml | 4 +- .../templates/mcu/service.yaml | 6 +- .../templates/nacos/config.sql | 2829 ++ .../templates/nacos/nacos.sql | 26 +- .../templates/points-api/deployment.yaml | 6 +- .../templates/recruit-api/deployment.yaml | 6 +- .../templates/user-api/deployment.yaml | 14 +- .../proprietary-cloud/templates/user-data.sql | 12 +- .../templates/web-ui/configmap.yaml | 38 +- 189 files changed, 87880 insertions(+), 156 deletions(-) create mode 100644 manifests/calico.yaml create mode 100644 manifests/coredns.yaml create mode 100644 manifests/es-indices-cleanup.yaml create mode 100644 manifests/es-statefulset.yaml create mode 100644 manifests/fluentd.yaml create mode 100644 manifests/glusterfs/gluster-storageclass.yaml create mode 100644 manifests/glusterfs/glusterfs-daemonset.yaml create mode 100644 manifests/glusterfs/heketi-desployment.yaml create mode 100644 manifests/glusterfs/topology.json create mode 100644 manifests/kibana.yaml create mode 100644 manifests/kube-proxy.yaml create mode 100644 manifests/kubernetes-dashboard.yaml create mode 100644 manifests/local-storage.yaml create mode 100644 manifests/metrics-server.yaml create mode 100644 manifests/openvpn.yaml create mode 100644 manifests/prometheus/@ create mode 100644 manifests/prometheus/alertmanager-alertmanager.yaml create mode 100644 manifests/prometheus/alertmanager-pvc.yaml.bak create mode 100644 manifests/prometheus/alertmanager-secret.yaml create mode 100644 manifests/prometheus/alertmanager-service.yaml create mode 100644 manifests/prometheus/alertmanager-serviceAccount.yaml create mode 100644 manifests/prometheus/alertmanager-serviceMonitor.yaml create mode 100644 manifests/prometheus/grafana-dashboardDatasources.yaml create mode 100644 manifests/prometheus/grafana-dashboardDefinitions.yaml create mode 100644 manifests/prometheus/grafana-dashboardSources.yaml create mode 100644 manifests/prometheus/grafana-deployment.yaml create mode 100644 manifests/prometheus/grafana-service.yaml create mode 100644 manifests/prometheus/grafana-serviceAccount.yaml create mode 100644 manifests/prometheus/grafana-serviceMonitor.yaml create mode 100644 manifests/prometheus/kube-state-metrics-clusterRole.yaml create mode 100644 manifests/prometheus/kube-state-metrics-clusterRoleBinding.yaml create mode 100644 manifests/prometheus/kube-state-metrics-deployment.yaml create mode 100644 manifests/prometheus/kube-state-metrics-role.yaml create mode 100644 manifests/prometheus/kube-state-metrics-roleBinding.yaml create mode 100644 manifests/prometheus/kube-state-metrics-service.yaml create mode 100644 manifests/prometheus/kube-state-metrics-serviceAccount.yaml create mode 100644 manifests/prometheus/kube-state-metrics-serviceMonitor.yaml create mode 100644 manifests/prometheus/node-exporter-clusterRole.yaml create mode 100644 manifests/prometheus/node-exporter-clusterRoleBinding.yaml create mode 100644 manifests/prometheus/node-exporter-daemonset.yaml create mode 100644 manifests/prometheus/node-exporter-service.yaml create mode 100644 manifests/prometheus/node-exporter-serviceAccount.yaml create mode 100644 manifests/prometheus/node-exporter-serviceMonitor.yaml create mode 100644 manifests/prometheus/prometheus-adapter-apiService.yaml create mode 100644 manifests/prometheus/prometheus-adapter-clusterRole.yaml create mode 100644 manifests/prometheus/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml create mode 100644 manifests/prometheus/prometheus-adapter-clusterRoleBinding.yaml create mode 100644 manifests/prometheus/prometheus-adapter-clusterRoleBindingDelegator.yaml create mode 100644 manifests/prometheus/prometheus-adapter-clusterRoleServerResources.yaml create mode 100644 manifests/prometheus/prometheus-adapter-configMap.yaml create mode 100644 manifests/prometheus/prometheus-adapter-deployment.yaml create mode 100644 manifests/prometheus/prometheus-adapter-roleBindingAuthReader.yaml create mode 100644 manifests/prometheus/prometheus-adapter-service.yaml create mode 100644 manifests/prometheus/prometheus-adapter-serviceAccount.yaml create mode 100644 manifests/prometheus/prometheus-clusterRole.yaml create mode 100644 manifests/prometheus/prometheus-clusterRoleBinding.yaml create mode 100644 manifests/prometheus/prometheus-operator-serviceMonitor.yaml create mode 100644 manifests/prometheus/prometheus-prometheus.yaml create mode 100644 manifests/prometheus/prometheus-roleBindingConfig.yaml create mode 100644 manifests/prometheus/prometheus-roleBindingSpecificNamespaces.yaml create mode 100644 manifests/prometheus/prometheus-roleConfig.yaml create mode 100644 manifests/prometheus/prometheus-roleSpecificNamespaces.yaml create mode 100644 manifests/prometheus/prometheus-rules.yaml create mode 100644 manifests/prometheus/prometheus-service.yaml create mode 100644 manifests/prometheus/prometheus-serviceAccount.yaml create mode 100644 manifests/prometheus/prometheus-serviceMonitor.yaml create mode 100644 manifests/prometheus/prometheus-serviceMonitorApiserver.yaml create mode 100644 manifests/prometheus/prometheus-serviceMonitorCoreDNS.yaml create mode 100644 manifests/prometheus/prometheus-serviceMonitorKubeControllerManager.yaml create mode 100644 manifests/prometheus/prometheus-serviceMonitorKubeScheduler.yaml create mode 100644 manifests/prometheus/prometheus-serviceMonitorKubelet.yaml create mode 100644 manifests/prometheus/setup/0namespace-namespace.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-0alertmanagerCustomResourceDefinition.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-0podmonitorCustomResourceDefinition.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-0prometheusCustomResourceDefinition.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-0prometheusruleCustomResourceDefinition.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-0servicemonitorCustomResourceDefinition.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-clusterRole.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-clusterRoleBinding.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-deployment.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-service.yaml create mode 100644 manifests/prometheus/setup/prometheus-operator-serviceAccount.yaml create mode 100644 manifests/proprietary-cloud/adapter-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/adapter-api/service.yaml create mode 100644 manifests/proprietary-cloud/app-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/app-api/service.yaml create mode 100644 manifests/proprietary-cloud/auth/deployment.yaml create mode 100644 manifests/proprietary-cloud/auth/service.yaml create mode 100644 manifests/proprietary-cloud/baremetal/deployment.yaml create mode 100644 manifests/proprietary-cloud/baremetal/service.yaml create mode 100644 manifests/proprietary-cloud/bbs-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/bbs-api/service.yaml create mode 100644 manifests/proprietary-cloud/biz-data.sql create mode 100644 manifests/proprietary-cloud/blog-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/blog-api/service.yaml create mode 100644 manifests/proprietary-cloud/campaigns-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/campaigns-api/service.yaml create mode 100644 manifests/proprietary-cloud/charts-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/charts-api/service.yaml create mode 100644 manifests/proprietary-cloud/cloud-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/cloud-api/service.yaml create mode 100644 manifests/proprietary-cloud/comment-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/comment-api/service.yaml create mode 100644 manifests/proprietary-cloud/config.sql create mode 100644 manifests/proprietary-cloud/daily-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/daily-api/service.yaml create mode 100644 manifests/proprietary-cloud/database/mysql-cluster.yaml create mode 100644 manifests/proprietary-cloud/database/mysql-operator.yaml create mode 100644 manifests/proprietary-cloud/database/mysql.presslabs.org_mysqlbackups.yaml create mode 100644 manifests/proprietary-cloud/database/mysql.presslabs.org_mysqlclusters.yaml create mode 100644 manifests/proprietary-cloud/database/mysql.presslabs.org_mysqldatabases.yaml create mode 100644 manifests/proprietary-cloud/database/mysql.presslabs.org_mysqlusers.yaml create mode 100644 manifests/proprietary-cloud/db.sql create mode 100644 manifests/proprietary-cloud/edu-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/edu-api/service.yaml create mode 100644 manifests/proprietary-cloud/elasticsearch.yaml create mode 100644 manifests/proprietary-cloud/es-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/es-api/service.yaml create mode 100644 manifests/proprietary-cloud/file-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/file-api/service.yaml create mode 100644 manifests/proprietary-cloud/gateway/deployment.yaml create mode 100644 manifests/proprietary-cloud/gateway/service.yaml create mode 100644 manifests/proprietary-cloud/init_config.sql create mode 100644 manifests/proprietary-cloud/mall-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/mall-api/service.yaml create mode 100644 manifests/proprietary-cloud/mcu/deployment.yaml create mode 100644 manifests/proprietary-cloud/mcu/service.yaml create mode 100644 manifests/proprietary-cloud/nacos.sql create mode 100644 manifests/proprietary-cloud/nacos.yaml create mode 100644 manifests/proprietary-cloud/namespace.yaml create mode 100644 manifests/proprietary-cloud/points-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/points-api/service.yaml create mode 100644 manifests/proprietary-cloud/recruit-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/recruit-api/service.yaml create mode 100644 manifests/proprietary-cloud/redis-sentinel.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/operator.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/rocketmq-exporter/rocketmq-exporter.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/rocketmq_v1alpha1_broker_crd.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/rocketmq_v1alpha1_cluster_service.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/rocketmq_v1alpha1_consoles_crd.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/rocketmq_v1alpha1_nameservice_crd.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/rocketmq_v1alpha1_rocketmq_cluster.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/rocketmq_v1alpha1_topictransfer_crd.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/role.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/role_binding.yaml create mode 100644 manifests/proprietary-cloud/rocketmq/service_account.yaml create mode 100644 manifests/proprietary-cloud/skywalking.yaml create mode 100644 manifests/proprietary-cloud/user-api/deployment.yaml create mode 100644 manifests/proprietary-cloud/user-api/service.yaml create mode 100644 manifests/proprietary-cloud/user-data.sql create mode 100644 manifests/proprietary-cloud/web-ui/configmap.yaml create mode 100644 manifests/proprietary-cloud/web-ui/deployment.yaml create mode 100644 manifests/proprietary-cloud/web-ui/service.yaml create mode 100644 manifests/traefik-ingress.yaml create mode 100644 roles/proprietary-cloud/templates/nacos/config.sql diff --git a/group_vars/all b/group_vars/all index 3ff9526..25212e9 100644 --- a/group_vars/all +++ b/group_vars/all @@ -69,8 +69,8 @@ node_taints: "" #本地镜像仓库端口 registry_port: 6550 -BASE_IMAGE_URL: >- - {{ groups['registry'][0]}}:{{registry_port}} +BASE_IMAGE_URL: dev-docker-registry.ccyunchina.com + #docker insecure registry 如果有多个用逗号分开 docker_insecure_registry: >- @@ -90,10 +90,10 @@ deploy_offline: true deploy_docker_registry: true #是否加载镜像 -load_images: true +load_images: false #是否推送镜像 -push_images: true +push_images: false #工具镜像(证书) @@ -104,4 +104,4 @@ gw_tools_image: toyangdon/gw-tools:1.1 cfssl_cmd: "cd {{ ca_dir }} && export PATH=$PATH:{{bin_dir}} && sh -c" #专有云业务镜像仓库 -CLOUD_IMAGE_URL: "{{BASE_IMAGE_URL}}" +CLOUD_IMAGE_URL: "dev-docker-registry.ccyunchina.com" diff --git a/hosts b/hosts index 481b6e7..deb2534 100644 --- a/hosts +++ b/hosts @@ -3,22 +3,23 @@ localhost #镜像仓库 [registry] -10.2.1.212 +10.2.1.99 #节点配置 #管理节点 [kube-master] -10.2.1.212 -10.2.1.170 -10.2.1.148 +10.2.1.99 +10.2.1.233 +10.2.1.235 #计算节点 [kube-compute-node] #存储节点 [kube-storage-node] -10.2.1.212 -10.2.1.170 +10.2.1.99 +10.2.1.233 +10.2.1.235 [kube-node:children] kube-compute-node @@ -34,20 +35,16 @@ lb #多管理节点时必须配置有代理节点 #LB_ROLE:master、backup、lb_only,master\backup表示使用keepalived实现haproxy高可用,lb_only表示不使用keepalived [lb] -10.2.1.212 -10.2.1.170 +10.2.1.99 +10.2.1.233 [elasticsearch] -10.2.1.212 -10.2.1.170 +10.2.1.99 +10.2.1.233 [mysql] -10.2.1.148 -10.2.1.170 - -#openvpn服务,请另外创建一台虚拟机(4c8g)作为openvpn服务器,该虚拟机可用于暴露私有云内部网络,openvpnExternalIp表示服务器的外部IP,openvpnRouteCIDR表示需求经过vpn路由的ip范围 -[openvpn] -10.2.1.224 openvpnExternalIp=10.2.1.224 openvpnRouteCIDR=10.2.1.0/16 ansible_ssh_user=root ansible_ssh_pass=Greatwall@123 +10.2.1.235 +10.2.1.233 # 预留组,后续添加node节点使用 [new-node] @@ -60,7 +57,7 @@ ansible_python_interpreter=/usr/bin/python3 MASTER_IP="10.2.1.164" #gfs磁盘设备名 -gfs_device=/dev/sdb +gfs_device=/dev/sda zstackServerIP=10.1.1.9 zstackServerAdminUser=admin diff --git a/manifests/calico.yaml b/manifests/calico.yaml new file mode 100644 index 0000000..77d7f0c --- /dev/null +++ b/manifests/calico.yaml @@ -0,0 +1,455 @@ +# Calico Version v3.3.7 +# https://docs.projectcalico.org/v3.3/releases#v3.3.7 +# This manifest includes the following component versions: +# calico/node:v3.3.7 +# calico/cni:v3.3.7 +# calico/kube-controllers:v3.3.7 + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-kube-controllers +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + - nodes + verbs: + - watch + - list +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + + +--- +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Configure this with the location of your etcd cluster. + etcd_endpoints: "https://10.2.1.99:2379,https://10.2.1.233:2379,https://10.2.1.235:2379" + + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "/calico-secrets/ca.pem" + etcd_cert: "/calico-secrets/calico.pem" + etcd_key: "/calico-secrets/calico-key.pem" + # Configure the Calico backend to use. + calico_backend: "bird" + + # Configure the MTU to use + veth_mtu: "1440" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.0", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "etcd_endpoints": "https://10.2.1.99:2379,https://10.2.1.233:2379,https://10.2.1.235:2379", + "etcd_key_file": "/etc/calico/ssl/calico-key.pem", + "etcd_cert_file": "/etc/calico/ssl/calico.pem", + "etcd_ca_cert_file": "/etc/calico/ssl/ca.pem", + "mtu": 1440, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "/root/.kube/config" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + } + +--- + + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: dev-docker-registry.ccyunchina.com/calico/node:v3.3.1 + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "cross-subnet" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + - name: CALICO_IPV4POOL_CIDR + value: "172.20.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 50m + memory: 100Mi + limits: + cpu: 500m + memory: 800Mi + livenessProbe: + httpGet: + path: /liveness + port: 9099 + host: localhost + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -bird-ready + - -felix-ready + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: dev-docker-registry.ccyunchina.com/calico/cni:v3.3.1 + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/k8s/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + hostPath: + path: /etc/calico/ssl +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- + +# This manifest deploys the Calico Kubernetes controllers. +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + selector: + matchLabels: + k8s-app: calico-kube-controllers + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + containers: + - name: calico-kube-controllers + image: dev-docker-registry.ccyunchina.com/calico/kube-controllers:v3.3.1 + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: policy,namespace,serviceaccount,workloadendpoint,node + resources: + requests: + cpu: 100m + memory: 40Mi + limits: + cpu: 150m + memory: 100Mi + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + volumes: + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + hostPath: + path: /etc/calico/ssl + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system \ No newline at end of file diff --git a/manifests/coredns.yaml b/manifests/coredns.yaml new file mode 100644 index 0000000..e25aa20 --- /dev/null +++ b/manifests/coredns.yaml @@ -0,0 +1,180 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf + cache 30 + reload + loadbalance + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: "CoreDNS" +spec: + replicas: 2 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + spec: + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + nodeSelector: + beta.kubernetes.io/os: linux + containers: + - name: coredns + image: dev-docker-registry.ccyunchina.com/toyangdon/coredns:v1.8.0-arm64 + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.68.0.2 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP \ No newline at end of file diff --git a/manifests/es-indices-cleanup.yaml b/manifests/es-indices-cleanup.yaml new file mode 100644 index 0000000..d56b262 --- /dev/null +++ b/manifests/es-indices-cleanup.yaml @@ -0,0 +1,18 @@ +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: es-index-cleaner + namespace: kube-system +spec: + # 每天1点3分执行 + schedule: "3 1 */1 * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: es-index-cleaner + image: dev-docker-registry.ccyunchina.com/toyangdon/es-index-cleaner:v0.1 + restartPolicy: OnFailure + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 diff --git a/manifests/es-statefulset.yaml b/manifests/es-statefulset.yaml new file mode 100644 index 0000000..3467c12 --- /dev/null +++ b/manifests/es-statefulset.yaml @@ -0,0 +1,268 @@ +# RBAC authn and authz +apiVersion: v1 +kind: ServiceAccount +metadata: + name: elasticsearch-logging + namespace: kube-system + labels: + k8s-app: elasticsearch-logging + addonmanager.kubernetes.io/mode: Reconcile +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: elasticsearch-logging + labels: + k8s-app: elasticsearch-logging + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: + - "" + resources: + - "services" + - "namespaces" + - "endpoints" + verbs: + - "get" +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: elasticsearch-logging + labels: + k8s-app: elasticsearch-logging + addonmanager.kubernetes.io/mode: Reconcile +subjects: +- kind: ServiceAccount + name: elasticsearch-logging + namespace: kube-system + apiGroup: "" +roleRef: + kind: ClusterRole + name: elasticsearch-logging + apiGroup: "" +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: es-config + namespace: kube-system +data: + elasticsearch.yml: | + path.data: /data/es/data + path.logs: /data/es/logs + #http.host: 0.0.0.0 + http.cors.enabled: true + http.cors.allow-origin: "*" + network.host: 0.0.0.0 + node.name: ${HOSTNAME}.elasticsearch + cluster.initial_master_nodes: ["elasticsearch-0.elasticsearch","elasticsearch-1.elasticsearch"] + cluster.name: elasticsearch + discovery.zen.ping.unicast.hosts: ["elasticsearch-0.elasticsearch","elasticsearch-1.elasticsearch"] + discovery.zen.minimum_master_nodes: 2 + discovery.zen.ping_timeout: 5s + +--- +--- +# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: elasticsearch + namespace: kube-system + labels: + k8s-app: elasticsearch + addonmanager.kubernetes.io/mode: Reconcile +spec: + serviceName: elasticsearch + replicas: 2 + selector: + matchLabels: + k8s-app: elasticsearch + template: + metadata: + labels: + k8s-app: elasticsearch + kubernetes.io/cluster-service: "true" + spec: +# hostNetwork: true + tolerations: + - operator: "Exists" + effect: "NoSchedule" + key: + nodeSelector: + "node.kubernetes.io/elasticsearch": "true" + affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: node-role.kubernetes.io/es +# operator: Exists + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - elasticsearch + topologyKey: kubernetes.io/hostname + containers: + - image: "dev-docker-registry.ccyunchina.com/toyangdon/elasticsearch-oss:7.10.2" + name: elasticsearch + resources: + # need more cpu upon initialization, therefore burstable class + limits: + cpu: 4 + memory: "6Gi" + requests: + cpu: 2 + memory: 3Gi + ports: + - containerPort: 9200 + name: db + protocol: TCP + - containerPort: 9300 + name: transport + protocol: TCP + livenessProbe: + tcpSocket: + port: transport + initialDelaySeconds: 30 + timeoutSeconds: 10 + readinessProbe: + tcpSocket: + port: transport + initialDelaySeconds: 30 + timeoutSeconds: 10 + volumeMounts: + - name: es-config + mountPath: /usr/share/elasticsearch/config/elasticsearch.yml + subPath: elasticsearch.yml + - name: es-persistent-storage + mountPath: /data/es + env: + - name: "NAMESPACE" + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "ES_JAVA_OPTS" + value: "-Xms3g -Xmx3g" + securityContext: + privileged: true + volumes: + - name: es-config + configMap: + name: es-config + items: + - key: elasticsearch.yml + path: elasticsearch.yml + initContainers: + - image: dev-docker-registry.ccyunchina.com/library/alpine:3.6 + command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"] + name: elasticsearch-init + securityContext: + privileged: true + volumeClaimTemplates: + - metadata: + name: es-persistent-storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: local-storage + selector: + matchLabels: + pv: elasticsearch +--- +kind: Service +apiVersion: v1 +metadata: + labels: + elastic-app: elasticsearch-service + name: elasticsearch-service + namespace: kube-system +spec: + ports: + - port: 9200 + name: db + targetPort: 9200 + - port: 9300 + name: transport + targetPort: 9300 + selector: + k8s-app: elasticsearch +--- +kind: Service +apiVersion: v1 +metadata: + labels: + elastic-app: elasticsearch-service + name: elasticsearch + namespace: kube-system +spec: + ports: + - port: 9200 + name: db + targetPort: 9200 + - port: 9300 + name: transport + targetPort: 9300 + selector: + k8s-app: elasticsearch + clusterIP: None + +--- +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: es-persistent-storage-node-99 + labels: + pv: elasticsearch +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 100Gi + local: + path: /data/es + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "node-99" + persistentVolumeReclaimPolicy: Retain + storageClassName: local-storage +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: es-persistent-storage-node-233 + labels: + pv: elasticsearch +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 100Gi + local: + path: /data/es + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "node-233" + persistentVolumeReclaimPolicy: Retain + storageClassName: local-storage diff --git a/manifests/fluentd.yaml b/manifests/fluentd.yaml new file mode 100644 index 0000000..5e26719 --- /dev/null +++ b/manifests/fluentd.yaml @@ -0,0 +1,481 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: fluentd-es + labels: + k8s-app: fluentd-es + addonmanager.kubernetes.io/mode: Reconcile +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: fluentd-es + labels: + k8s-app: fluentd-es + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: + - "" + resources: + - "namespaces" + - "pods" + verbs: + - "get" + - "watch" + - "list" +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: fluentd-es + labels: + k8s-app: fluentd-es + addonmanager.kubernetes.io/mode: Reconcile +subjects: +- kind: ServiceAccount + name: fluentd-es + namespace: kube-system + apiGroup: "" +roleRef: + kind: ClusterRole + name: fluentd-es + apiGroup: "" +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd-es-v3.1.1 + labels: + k8s-app: fluentd-es + version: v3.1.1 + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: fluentd-es + version: v3.1.1 + template: + metadata: + labels: + k8s-app: fluentd-es + version: v3.1.1 + spec: + priorityClassName: system-node-critical + serviceAccountName: fluentd-es + containers: + - name: fluentd-es + image: dev-docker-registry.ccyunchina.com/toyangdon/fluentd-es-image-arm:1.12.0 + env: + - name: FLUENTD_ARGS + value: --no-supervisor -q + resources: + limits: + memory: 500Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlog + mountPath: /var/log + - name: datalog + mountPath: /data/log + - name: varlibdockercontainers + mountPath: /data/docker/containers + readOnly: true + - name: config-volume + mountPath: /etc/fluent/config.d + ports: + - containerPort: 24231 + name: prometheus + protocol: TCP + livenessProbe: + tcpSocket: + port: prometheus + initialDelaySeconds: 5 + timeoutSeconds: 10 + readinessProbe: + tcpSocket: + port: prometheus + initialDelaySeconds: 5 + timeoutSeconds: 10 + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: datalog + hostPath: + path: /data/log + - name: varlibdockercontainers + hostPath: + path: /data/docker/containers + - name: config-volume + configMap: + name: fluentd-es-config-v0.2.1 +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: fluentd-es-config-v0.2.1 + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + system.conf: |- + + root_dir /tmp/fluentd-buffers/ + + containers.input.conf: |- + # This configuration file for Fluentd / td-agent is used + # to watch changes to Docker log files. The kubelet creates symlinks that + # capture the pod name, namespace, container name & Docker container ID + # to the docker logs for pods in the /var/log/containers directory on the host. + # If running this fluentd configuration in a Docker container, the /var/log + # directory should be mounted in the container. + # + # These logs are then submitted to Elasticsearch which assumes the + # installation of the fluent-plugin-elasticsearch & the + # fluent-plugin-kubernetes_metadata_filter plugins. + # See https://github.com/uken/fluent-plugin-elasticsearch & + # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for + # more information about the plugins. + # + # Example + # ======= + # A line in the Docker log file might look like this JSON: + # + # {"log":"2014/09/25 21:15:03 Got request with path wombat\n", + # "stream":"stderr", + # "time":"2014-09-25T21:15:03.499185026Z"} + # + # The time_format specification below makes sure we properly + # parse the time format produced by Docker. This will be + # submitted to Elasticsearch and should appear like: + # $ curl 'http://elasticsearch-logging:9200/_search?pretty' + # ... + # { + # "_index" : "logstash-2014.09.25", + # "_type" : "fluentd", + # "_id" : "VBrbor2QTuGpsQyTCdfzqA", + # "_score" : 1.0, + # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n", + # "stream":"stderr","tag":"docker.container.all", + # "@timestamp":"2014-09-25T22:45:50+00:00"} + # }, + # ... + # + # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log + # record & add labels to the log record if properly configured. This enables users + # to filter & search logs on any metadata. + # For example a Docker container's logs might be in the directory: + # + # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b + # + # and in the file: + # + # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log + # + # where 997599971ee6... is the Docker ID of the running container. + # The Kubernetes kubelet makes a symbolic link to this file on the host machine + # in the /var/log/containers directory which includes the pod name and the Kubernetes + # container name: + # + # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # -> + # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log + # + # The /var/log directory on the host is mapped to the /var/log directory in the container + # running this instance of Fluentd and we end up collecting the file: + # + # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # + # This results in the tag: + # + # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # + # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name + # which are added to the log message as a kubernetes field object & the Docker container ID + # is also added under the docker field object. + # The final tag is: + # + # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # + # And the final log record look like: + # + # { + # "log":"2014/09/25 21:15:03 Got request with path wombat\n", + # "stream":"stderr", + # "time":"2014-09-25T21:15:03.499185026Z", + # "kubernetes": { + # "namespace": "default", + # "pod_name": "synthetic-logger-0.25lps-pod", + # "container_name": "synth-lgr" + # }, + # "docker": { + # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b" + # } + # } + # + # This makes it easier for users to search for logs by pod name or by + # the name of the Kubernetes container regardless of how many times the + # Kubernetes pod has been restarted (resulting in a several Docker container IDs). + # Json Log Example: + # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} + # CRI Log Example: + # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here + + @id fluentd-containers.log + @type tail + path /var/log/containers/*.log + pos_file /var/log/es-containers.log.pos + tag raw.kubernetes.* + read_from_head true + + @type multi_format + + format json + time_key time + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + format /^(? + + + # Detect exceptions in the log output and forward them as one log entry. + + @id raw.kubernetes + @type detect_exceptions + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + + # Concatenate multi-line logs + + @id filter_concat + @type concat + key message + multiline_end_regexp /\n$/ + separator "" + + # Enriches records with Kubernetes metadata + + @id filter_kubernetes_metadata + @type kubernetes_metadata + + # Fixes json fields in Elasticsearch + + @id filter_parser + @type parser + key_name log + reserve_data true + remove_key_name_field true + + @type multi_format + + format json + + + format none + + + + system.input.conf: |- + # Example: + # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 + + @id minion + @type tail + format /^(?