This commit is contained in:
ydoit 2024-05-28 13:48:39 +08:00
parent 4555d88bb1
commit 0cc7f09ed6
94 changed files with 0 additions and 54575 deletions

View File

@ -1,6 +0,0 @@
elasticsearch_image: toyangdon/elasticsearch-oss:7.10.2
alpine_image: library/alpine:3.6
elasticsearch_indices_cleanup_image: toyangdon/es-index-cleaner:v0.1
es_data_path: >-
{{data_dir}}/es

View File

@ -1,20 +0,0 @@
- name: 创建es-statefulset.yaml文件
template: src=es-statefulset.yaml dest={{ base_dir }}/manifests/es-statefulset.yaml
delegate_to: localhost
run_once: true
when: not ansible_check_mode
- name: 部署es-statefulset
shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/es-statefulset.yaml"
delegate_to: localhost
run_once: true
- name: 创建es-indices-cleanup.yaml文件
template: src=es-indices-cleanup.yaml dest={{ base_dir }}/manifests/es-indices-cleanup.yaml
delegate_to: localhost
run_once: true
- name: 部署es-index-clean-job
shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/es-indices-cleanup.yaml"
delegate_to: localhost
run_once: true

View File

@ -1,29 +0,0 @@
- name: 创建数据目录
file: name={{es_data_path}}/data state=directory mode=0755 owner=1000 group=1000 recurse=yes
- name: 创建日志目录
file: name={{es_data_path}}/logs state=directory mode=0755 owner=1000 group=1000 recurse=yes
#- name: 修改系统文件限制
# sysctl:
# name: vm.max_map_count
# value: 655360
# sysctl_set: yes
# state: present
# reload: yes
- include_tasks : container.yml
#- name: 等待cloud-service服务启动成功
# shell: "{{bin_dir}}/kubectl get statefulset elasticsearch -n kube-system -o jsonpath='{.status.readyReplicas}'"
# register: status_result
# until: 'groups["elasticsearch"]|length|string == status_result.stdout'
# retries: 50
# delay: 5
# run_once: true
#- name: 重启es节点,目前发现部署elasticsearch节点会出现网络问题重启可以恢复
# shell: shutdown -r
#- name: wait for connecting
# wait_for_connection: delay=60

View File

@ -1,18 +0,0 @@
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: es-index-cleaner
namespace: kube-system
spec:
# 每天1点3分执行
schedule: "3 1 */1 * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: es-index-cleaner
image: {{BASE_IMAGE_URL}}/{{elasticsearch_indices_cleanup_image}}
restartPolicy: OnFailure
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3

View File

@ -1,248 +0,0 @@
# RBAC authn and authz
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: elasticsearch-logging
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: elasticsearch-logging
apiGroup: ""
---
apiVersion: v1
kind: ConfigMap
metadata:
name: es-config
namespace: kube-system
data:
elasticsearch.yml: |
path.data: {{es_data_path}}/data
path.logs: {{es_data_path}}/logs
#http.host: 0.0.0.0
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 0.0.0.0
node.name: ${HOSTNAME}.elasticsearch
cluster.initial_master_nodes: [{%- for node in groups['elasticsearch'] -%}"elasticsearch-{{loop.index - 1}}.elasticsearch"{% if not loop.last %},{% endif %}{%- endfor -%}]
cluster.name: elasticsearch
discovery.zen.ping.unicast.hosts: [{%- for node in groups['elasticsearch'] -%}"elasticsearch-{{loop.index - 1}}.elasticsearch"{% if not loop.last %},{% endif %}{%- endfor -%}]
discovery.zen.minimum_master_nodes: {{(groups['elasticsearch']|length) // 2 + 1}}
discovery.zen.ping_timeout: 5s
---
---
# https://raw.githubusercontent.com/kubernetes/kubernetes/v1.10.2/cluster/addons/fluentd-elasticsearch/es-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch
namespace: kube-system
labels:
k8s-app: elasticsearch
addonmanager.kubernetes.io/mode: Reconcile
spec:
serviceName: elasticsearch
replicas: {{groups['elasticsearch']|length}}
selector:
matchLabels:
k8s-app: elasticsearch
template:
metadata:
labels:
k8s-app: elasticsearch
kubernetes.io/cluster-service: "true"
spec:
# hostNetwork: true
tolerations:
{% for taint in node_taints.split(",") %}
- operator: "Exists"
effect: "NoSchedule"
key: {{taint|replace("=:NoSchedule","")}}
{% endfor %}
nodeSelector:
"node.kubernetes.io/elasticsearch": "true"
affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node-role.kubernetes.io/es
# operator: Exists
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- elasticsearch
topologyKey: kubernetes.io/hostname
containers:
- image: "{{BASE_IMAGE_URL}}/{{ elasticsearch_image }}"
name: elasticsearch
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 4
memory: "6Gi"
requests:
cpu: 2
memory: 3Gi
ports:
- containerPort: 9200
name: db
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
livenessProbe:
tcpSocket:
port: transport
initialDelaySeconds: 30
timeoutSeconds: 10
readinessProbe:
tcpSocket:
port: transport
initialDelaySeconds: 30
timeoutSeconds: 10
volumeMounts:
- name: es-config
mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
subPath: elasticsearch.yml
- name: es-persistent-storage
mountPath: /data/es
env:
- name: "NAMESPACE"
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "ES_JAVA_OPTS"
value: "-Xms3g -Xmx3g"
securityContext:
privileged: true
volumes:
- name: es-config
configMap:
name: es-config
items:
- key: elasticsearch.yml
path: elasticsearch.yml
initContainers:
- image: {{BASE_IMAGE_URL}}/{{ alpine_image }}
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-init
securityContext:
privileged: true
volumeClaimTemplates:
- metadata:
name: es-persistent-storage
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
storageClassName: local-storage
selector:
matchLabels:
pv: elasticsearch
---
kind: Service
apiVersion: v1
metadata:
labels:
elastic-app: elasticsearch-service
name: elasticsearch-service
namespace: kube-system
spec:
ports:
- port: 9200
name: db
targetPort: 9200
- port: 9300
name: transport
targetPort: 9300
selector:
k8s-app: elasticsearch
---
kind: Service
apiVersion: v1
metadata:
labels:
elastic-app: elasticsearch-service
name: elasticsearch
namespace: kube-system
spec:
ports:
- port: 9200
name: db
targetPort: 9200
- port: 9300
name: transport
targetPort: 9300
selector:
k8s-app: elasticsearch
clusterIP: None
---
{% for node in groups['elasticsearch'] %}
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: es-persistent-storage-{{hostvars[node].NODE_ID}}
labels:
pv: elasticsearch
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 100Gi
local:
path: {{es_data_path}}
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- "{{hostvars[node].NODE_ID}}"
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
{% endfor %}

View File

@ -1 +0,0 @@
fluentd_image: toyangdon/fluentd-es-image-arm:1.12.0

View File

@ -1,7 +0,0 @@
- name: 创建fluentd.yaml文件
template: src=fluentd.yaml dest={{ base_dir }}/manifests/fluentd.yaml
delegate_to: localhost
- name: 部署fluentd.yaml
shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/fluentd.yaml -n kube-system"
delegate_to: localhost

View File

@ -1 +0,0 @@
- include_tasks : container.yml

View File

@ -1,481 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "pods"
verbs:
- "get"
- "watch"
- "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: fluentd-es
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: fluentd-es
apiGroup: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v3.1.1
labels:
k8s-app: fluentd-es
version: v3.1.1
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v3.1.1
template:
metadata:
labels:
k8s-app: fluentd-es
version: v3.1.1
spec:
priorityClassName: system-node-critical
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: {{BASE_IMAGE_URL}}/{{fluentd_image}}
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: datalog
mountPath: /data/log
- name: varlibdockercontainers
mountPath: /data/docker/containers
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
ports:
- containerPort: 24231
name: prometheus
protocol: TCP
livenessProbe:
tcpSocket:
port: prometheus
initialDelaySeconds: 5
timeoutSeconds: 10
readinessProbe:
tcpSocket:
port: prometheus
initialDelaySeconds: 5
timeoutSeconds: 10
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: datalog
hostPath:
path: /data/log
- name: varlibdockercontainers
hostPath:
path: /data/docker/containers
- name: config-volume
configMap:
name: fluentd-es-config-v0.2.1
---
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.2.1
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
containers.input.conf: |-
# This configuration file for Fluentd / td-agent is used
# to watch changes to Docker log files. The kubelet creates symlinks that
# capture the pod name, namespace, container name & Docker container ID
# to the docker logs for pods in the /var/log/containers directory on the host.
# If running this fluentd configuration in a Docker container, the /var/log
# directory should be mounted in the container.
#
# These logs are then submitted to Elasticsearch which assumes the
# installation of the fluent-plugin-elasticsearch & the
# fluent-plugin-kubernetes_metadata_filter plugins.
# See https://github.com/uken/fluent-plugin-elasticsearch &
# https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
# more information about the plugins.
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The time_format specification below makes sure we properly
# parse the time format produced by Docker. This will be
# submitted to Elasticsearch and should appear like:
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
# ...
# {
# "_index" : "logstash-2014.09.25",
# "_type" : "fluentd",
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
# "_score" : 1.0,
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
# "stream":"stderr","tag":"docker.container.all",
# "@timestamp":"2014-09-25T22:45:50+00:00"}
# },
# ...
#
# The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
# record & add labels to the log record if properly configured. This enables users
# to filter & search logs on any metadata.
# For example a Docker container's logs might be in the directory:
#
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
#
# and in the file:
#
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
# in the /var/log/containers directory which includes the pod name and the Kubernetes
# container name:
#
# synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
#
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
#
# /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# This results in the tag:
#
# var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
# which are added to the log message as a kubernetes field object & the Docker container ID
# is also added under the docker field object.
# The final tag is:
#
# kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
#
# And the final log record look like:
#
# {
# "log":"2014/09/25 21:15:03 Got request with path wombat\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z",
# "kubernetes": {
# "namespace": "default",
# "pod_name": "synthetic-logger-0.25lps-pod",
# "container_name": "synth-lgr"
# },
# "docker": {
# "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
# }
# }
#
# This makes it easier for users to search for logs by pod name or by
# the name of the Kubernetes container regardless of how many times the
# Kubernetes pod has been restarted (resulting in a several Docker container IDs).
# Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
# CRI Log Example:
# 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
<source>
@id fluentd-containers.log
@type tail
path /var/log/containers/*.log
pos_file /var/log/es-containers.log.pos
tag raw.kubernetes.*
read_from_head true
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@id raw.kubernetes
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
# Concatenate multi-line logs
<filter **>
@id filter_concat
@type concat
key message
multiline_end_regexp /\n$/
separator ""
</filter>
# Enriches records with Kubernetes metadata
<filter kubernetes.**>
@id filter_kubernetes_metadata
@type kubernetes_metadata
</filter>
# Fixes json fields in Elasticsearch
<filter kubernetes.**>
@id filter_parser
@type parser
key_name log
reserve_data true
remove_key_name_field true
<parse>
@type multi_format
<pattern>
format json
</pattern>
<pattern>
format none
</pattern>
</parse>
</filter>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
@id minion
@type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
@id startupscript.log
@type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/es-startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
# TODO(random-liu): Remove this after cri container runtime rolls out.
<source>
@id docker.log
@type tail
format /^time="(?<time>[^"]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/es-docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
@id etcd.log
@type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /data/log/etcd/etcd.log
pos_file /data/log/etcd/es-etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
@id kubelet.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /data/log/kubelet/kubelet.log
pos_file /data/log/kubelet/es-kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
@id glbc.log
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/es-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
@id journald-docker
@type systemd
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-docker.pos
</storage>
read_from_head true
tag docker
</source>
<source>
@id journald-container-runtime
@type systemd
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
<storage>
@type local
persistent true
path /var/log/journald-container-runtime.pos
</storage>
read_from_head true
tag container-runtime
</source>
<source>
@id journald-kubelet
@type systemd
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
<storage>
@type local
persistent true
path /var/log/journald-kubelet.pos
</storage>
read_from_head true
tag kubelet
</source>
<source>
@id kernel
@type systemd
matches [{ "_TRANSPORT": "kernel" }]
<storage>
@type local
persistent true
path /var/log/kernel.pos
</storage>
<entry>
fields_strip_underscores true
fields_lowercase true
</entry>
read_from_head true
tag kernel
</source>
forward.input.conf: |-
# Takes the messages sent over TCP
<source>
@id forward
@type forward
</source>
monitoring.conf: |-
# Prometheus Exporter Plugin
# input plugin that exports metrics
<source>
@id prometheus
@type prometheus
</source>
<source>
@id monitor_agent
@type monitor_agent
</source>
# input plugin that collects metrics from MonitorAgent
<source>
@id prometheus_monitor
@type prometheus_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for output plugin
<source>
@id prometheus_output_monitor
@type prometheus_output_monitor
<labels>
host ${hostname}
</labels>
</source>
# input plugin that collects metrics for in_tail plugin
<source>
@id prometheus_tail_monitor
@type prometheus_tail_monitor
<labels>
host ${hostname}
</labels>
</source>
output.conf: |-
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
type_name _doc
include_tag_key true
host elasticsearch-service
port 9200
logstash_format true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
total_limit_size 500M
overflow_action block
</buffer>
</match>

View File

@ -1 +0,0 @@
kibana_image: toyangdon/kibana-arm:7.10.2

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,25 +0,0 @@
- name: 创建kibana.yaml文件
template: src=kibana.yaml dest={{ base_dir }}/manifests/kibana.yaml
ignore_errors: "{{ ansible_check_mode }}"
- name: 部署kibana
shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/kibana.yaml -n kube-system"
ignore_errors: "{{ ansible_check_mode }}"
#- name: 导入kibana dashboards.json文件如果超时请手动去执行
# shell: "curl -XPOST 'http://{{MASTER_IP}}:5601/api/kibana/dashboards/import?force=true' -H 'Content-Type: application/json' -H 'kbn-xsrf: reporting' -d @{{ base_dir }}/roles/elk/kibana/files/dashboards.json -m 10 -o /dev/null -s -w %{http_code}"
# register: kibana_result
# until: '"200" == kibana_result.stdout'
# retries: 60
# delay: 15
# ignore_errors: true
# when: not ansible_check_mode
#- name: 导入kibana filebeat.json文件如果超时请手动去执行
# shell: "curl -XPOST 'http://{{MASTER_IP}}:5601/api/kibana/dashboards/import?force=true' -H 'Content-Type: application/json' -H 'kbn-xsrf: reporting' -d @{{ base_dir }}/roles/elk/kibana/files/filebeat.json -m 10 -o /dev/null -s -w %{http_code}"
# register: kibana_result1
# until: '"200" == kibana_result1.stdout'
# retries: 60
# delay: 15
# ignore_errors: true
# when: not ansible_check_mode

View File

@ -1,67 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-logging
labels:
k8s-app: kibana-logging
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kibana-logging
template:
metadata:
labels:
k8s-app: kibana-logging
spec:
containers:
- name: kibana-logging
image: "{{BASE_IMAGE_URL}}/{{ kibana_image }}"
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch-service:9200
- name: SERVER_NAME
value: kibana-logging
ports:
- containerPort: 5601
name: ui
protocol: TCP
livenessProbe:
httpGet:
path: /api/status
port: ui
initialDelaySeconds: 5
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /api/status
port: ui
initialDelaySeconds: 5
timeoutSeconds: 10
---
apiVersion: v1
kind: Service
metadata:
name: kibana-logging
labels:
k8s-app: kibana-logging
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "Kibana"
spec:
ports:
- port: 5601
protocol: TCP
targetPort: ui
nodePort: 25601
selector:
k8s-app: kibana-logging
type: NodePort

View File

@ -1,14 +0,0 @@
# 用于ansible安装部署配置文件
此项目为容器云部署组件之一,部署文件打包时,会从此目录中取得该组件的部署配置文件。
## 说明
该目录的结构与ansible roles目录结构匹配文件格式都是yaml格式语法是基于jinja2。一般来说开发人员应该能够理解大概脚本含义因此当组件的部署配置更新时请开发人员自行修改此目录下的相关配置脚本。
1. default目录
主要存放组件配置模板文件中使用的变量
2. template目录
存放组件配置模板。模板中的出现的"{{var_x}}"表示引用变量var_x
3. task目录
存放组件部署过程脚本
## 部署验证
开发人员应在组件测试通过之后更新配置文件然后通过jenkins执行一次语法验证。
待补充

View File

@ -1,10 +0,0 @@
prometheus_image: prom/prometheus:v2.11.0
alertmanager_image: prom/alertmanager:v0.18.0
grafana_image: grafana/grafana:6.6.2
kube_rbac_proxy_image: toyangdon/kube-rbac-proxy:v0.4.1
kube_state_metrics_image: toyangdon/kube-state-metrics-arm64:v1.9.0
node_exporter_image: prom/node-exporter:v0.18.1
prometheus_adapter_image: toyangdon/k8s-prometheus-adapter:v0.5.0
config_reloader_image: toyangdon/configmap-reload-arm64:v0.1
prometheus_operator_image: toyangdon/prometheus-operator:v0.34.0
prometheus_config_reloader_image: toyangdon/prometheus-config-reloader:v0.34.0

View File

@ -1,8 +0,0 @@
apiVersion: v1
data:
alertmanager.yaml: Imdsb2JhbCI6CiAgInJlc29sdmVfdGltZW91dCI6ICI1bSIKInJlY2VpdmVycyI6Ci0gIm5hbWUiOiAibnVsbCIKInJvdXRlIjoKICAiZ3JvdXBfYnkiOgogIC0gImpvYiIKICAiZ3JvdXBfaW50ZXJ2YWwiOiAiNW0iCiAgImdyb3VwX3dhaXQiOiAiMzBzIgogICJyZWNlaXZlciI6ICJudWxsIgogICJyZXBlYXRfaW50ZXJ2YWwiOiAiMTJoIgogICJyb3V0ZXMiOgogIC0gIm1hdGNoIjoKICAgICAgImFsZXJ0bmFtZSI6ICJXYXRjaGRvZyIKICAgICJyZWNlaXZlciI6ICJudWxsIg==
kind: Secret
metadata:
name: alertmanager-main
namespace: monitoring
type: Opaque

View File

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
alertmanager: main
name: alertmanager-main
namespace: monitoring
spec:
ports:
- name: web
nodePort: 22093
port: 9093
targetPort: web
selector:
alertmanager: main
app: alertmanager
sessionAffinity: ClientIP
type: NodePort

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: alertmanager-main
namespace: monitoring

View File

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: alertmanager
name: alertmanager
namespace: monitoring
spec:
endpoints:
- interval: 30s
port: web
selector:
matchLabels:
alertmanager: main

View File

@ -1,8 +0,0 @@
apiVersion: v1
data:
datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0=
kind: Secret
metadata:
name: grafana-datasources
namespace: monitoring
type: Opaque

View File

@ -1,21 +0,0 @@
apiVersion: v1
data:
dashboards.yaml: |-
{
"apiVersion": 1,
"providers": [
{
"folder": "",
"name": "0",
"options": {
"path": "/grafana-dashboard-definitions/0"
},
"orgId": 1,
"type": "file"
}
]
}
kind: ConfigMap
metadata:
name: grafana-dashboards
namespace: monitoring

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: grafana
name: grafana
namespace: monitoring
spec:
ports:
- name: http
nodePort: 23000
port: 3000
targetPort: http
selector:
app: grafana
type: NodePort

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: grafana
namespace: monitoring

View File

@ -1,12 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: grafana
namespace: monitoring
spec:
endpoints:
- interval: 15s
port: http
selector:
matchLabels:
app: grafana

View File

@ -1,91 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-state-metrics
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- nodes
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- namespaces
- endpoints
verbs:
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- replicasets
- ingresses
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
- daemonsets
- deployments
- replicasets
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- list
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- list
- watch

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: monitoring

View File

@ -1,30 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: kube-state-metrics
namespace: monitoring
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- extensions
resourceNames:
- kube-state-metrics
resources:
- deployments
verbs:
- get
- update
- apiGroups:
- apps
resourceNames:
- kube-state-metrics
resources:
- deployments
verbs:
- get
- update

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kube-state-metrics
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics

View File

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: kube-state-metrics
name: kube-state-metrics
namespace: monitoring
spec:
clusterIP: None
ports:
- name: https-main
port: 8443
targetPort: https-main
- name: https-self
port: 9443
targetPort: https-self
selector:
app: kube-state-metrics

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-state-metrics
namespace: monitoring

View File

@ -1,30 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: kube-state-metrics
name: kube-state-metrics
namespace: monitoring
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s
port: https-main
relabelings:
- action: labeldrop
regex: (pod|service|endpoint|namespace)
scheme: https
scrapeTimeout: 30s
tlsConfig:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 30s
port: https-self
scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: k8s-app
selector:
matchLabels:
k8s-app: kube-state-metrics

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-exporter
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-exporter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: node-exporter
subjects:
- kind: ServiceAccount
name: node-exporter
namespace: monitoring

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: node-exporter
name: node-exporter
namespace: monitoring
spec:
clusterIP: None
ports:
- name: https
port: 9100
targetPort: https
selector:
app: node-exporter

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-exporter
namespace: monitoring

View File

@ -1,26 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: node-exporter
name: node-exporter
namespace: monitoring
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 30s
port: https
relabelings:
- action: replace
regex: (.*)
replacement: $1
sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: instance
scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: k8s-app
selector:
matchLabels:
k8s-app: node-exporter

View File

@ -1,13 +0,0 @@
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: prometheus-adapter
namespace: monitoring
version: v1beta1
versionPriority: 100

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus-adapter
rules:
- apiGroups:
- ""
resources:
- nodes
- namespaces
- pods
- services
verbs:
- get
- list
- watch

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
verbs:
- get
- list
- watch

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus-adapter
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-adapter
subjects:
- kind: ServiceAccount
name: prometheus-adapter
namespace: monitoring

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: resource-metrics:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: prometheus-adapter
namespace: monitoring

View File

@ -1,11 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: resource-metrics-server-resources
rules:
- apiGroups:
- metrics.k8s.io
resources:
- '*'
verbs:
- '*'

View File

@ -1,33 +0,0 @@
apiVersion: v1
data:
config.yaml: |
resourceRules:
cpu:
containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>)
nodeQuery: sum(1 - rate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)
resources:
overrides:
node:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
memory:
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}) by (<<.GroupBy>>)
nodeQuery: sum(node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}) by (<<.GroupBy>>)
resources:
overrides:
instance:
resource: node
namespace:
resource: namespace
pod:
resource: pod
containerLabel: container
window: 5m
kind: ConfigMap
metadata:
name: adapter-config
namespace: monitoring

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: resource-metrics-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: prometheus-adapter
namespace: monitoring

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
name: prometheus-adapter
name: prometheus-adapter
namespace: monitoring
spec:
ports:
- name: https
nodePort: 22443
port: 443
targetPort: 6443
selector:
name: prometheus-adapter
type: NodePort

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-adapter
namespace: monitoring

View File

@ -1,32 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus-k8s
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- persistentvolumeclaims
- persistentvolumeclaims/status
- pods
- replicationcontrollers
- replicationcontrollers/scale
- serviceaccounts
- services
- services/status
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/metrics
verbs:
- get
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus-k8s
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-k8s
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring

View File

@ -1,18 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0
name: prometheus-operator
namespace: monitoring
spec:
endpoints:
- honorLabels: true
port: http
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0

View File

@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus-k8s-config
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s-config
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring

View File

@ -1,42 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus-k8s
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus-k8s
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus-k8s
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring
kind: RoleBindingList

View File

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus-k8s-config
namespace: monitoring
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get

View File

@ -1,51 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus-k8s
namespace: default
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus-k8s
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus-k8s
namespace: monitoring
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
kind: RoleList

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
prometheus: k8s
name: prometheus-k8s
namespace: monitoring
spec:
ports:
- name: web
nodePort: 22090
port: 9090
targetPort: web
selector:
app: prometheus
prometheus: k8s
sessionAffinity: ClientIP
type: NodePort

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus-k8s
namespace: monitoring

View File

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: prometheus
name: prometheus
namespace: monitoring
spec:
endpoints:
- interval: 30s
port: web
selector:
matchLabels:
prometheus: k8s

View File

@ -1,37 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: apiserver
name: kube-apiserver
namespace: monitoring
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 30s
metricRelabelings:
- action: drop
regex: etcd_(debugging|disk|request|server).*
sourceLabels:
- __name__
- action: drop
regex: apiserver_admission_controller_admission_latencies_seconds_.*
sourceLabels:
- __name__
- action: drop
regex: apiserver_admission_step_admission_latencies_seconds_.*
sourceLabels:
- __name__
port: https
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
jobLabel: component
namespaceSelector:
matchNames:
- default
selector:
matchLabels:
component: apiserver
provider: kubernetes

View File

@ -1,19 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: coredns
name: coredns
namespace: monitoring
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 15s
port: metrics
jobLabel: k8s-app
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
k8s-app: kube-dns

View File

@ -1,23 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: kube-controller-manager
name: kube-controller-manager
namespace: monitoring
spec:
endpoints:
- interval: 30s
metricRelabelings:
- action: drop
regex: etcd_(debugging|disk|request|server).*
sourceLabels:
- __name__
port: http-metrics
jobLabel: k8s-app
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
k8s-app: kube-controller-manager

View File

@ -1,18 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: kube-scheduler
name: kube-scheduler
namespace: monitoring
spec:
endpoints:
- interval: 30s
port: http-metrics
jobLabel: k8s-app
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
k8s-app: kube-scheduler

View File

@ -1,44 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: kubelet
name: kubelet
namespace: monitoring
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s
port: https-metrics
relabelings:
- sourceLabels:
- __metrics_path__
targetLabel: metrics_path
scheme: https
tlsConfig:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 30s
metricRelabelings:
- action: drop
regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
sourceLabels:
- __name__
path: /metrics/cadvisor
port: https-metrics
relabelings:
- sourceLabels:
- __metrics_path__
targetLabel: metrics_path
scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: k8s-app
namespaceSelector:
matchNames:
- kube-system
selector:
matchLabels:
k8s-app: kubelet

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring

View File

@ -1,239 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: podmonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
kind: PodMonitor
plural: podmonitors
scope: Namespaced
validation:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: PodMonitorSpec contains specification parameters for a PodMonitor.
properties:
jobLabel:
description: The label to use to retrieve the job name from.
type: string
namespaceSelector:
description: NamespaceSelector is a selector for selecting either all
namespaces or a list of namespaces.
properties:
any:
description: Boolean describing whether all namespaces are selected
in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names.
items:
type: string
type: array
type: object
podMetricsEndpoints:
description: A list of endpoints allowed as part of this PodMonitor.
items:
description: PodMetricsEndpoint defines a scrapeable endpoint of a
Kubernetes Pod serving Prometheus metrics.
properties:
honorLabels:
description: HonorLabels chooses the metric's labels on collisions
with target labels.
type: boolean
honorTimestamps:
description: HonorTimestamps controls whether Prometheus respects
the timestamps present in scraped data.
type: boolean
interval:
description: Interval at which metrics should be scraped
type: string
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. defailt is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
params:
description: Optional HTTP URL parameters
type: object
path:
description: HTTP path to scrape for metrics.
type: string
port:
description: Name of the port this endpoint refers to. Mutually
exclusive with targetPort.
type: string
proxyUrl:
description: ProxyURL eg http://proxyserver:2195 Directs scrapes
to proxy through this endpoint.
type: string
relabelings:
description: 'RelabelConfigs to apply to samples before ingestion.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. defailt is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
scheme:
description: HTTP scheme to use for scraping.
type: string
scrapeTimeout:
description: Timeout after which the scrape is ended
type: string
targetPort:
anyOf:
- type: string
- type: integer
type: object
type: array
podTargetLabels:
description: PodTargetLabels transfers labels on the Kubernetes Pod
onto the target.
items:
type: string
type: array
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped
samples that will be accepted.
format: int64
type: integer
selector:
description: A label selector is a label query over a set of resources.
The result of matchLabels and matchExpressions are ANDed. An empty
label selector matches all objects. A null label selector matches
no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains
values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to a
set of values. Valid operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator
is In or NotIn, the values array must be non-empty. If the
operator is Exists or DoesNotExist, the values array must
be empty. This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator is
"In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
required:
- podMetricsEndpoints
- selector
type: object
type: object
version: v1

View File

@ -1,250 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: prometheusrules.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
kind: PrometheusRule
plural: prometheusrules
scope: Namespaced
validation:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
description: ObjectMeta is metadata that all persisted resources must have,
which includes all objects users must create.
properties:
annotations:
description: 'Annotations is an unstructured key value map stored with
a resource that may be set by external tools to store and retrieve
arbitrary metadata. They are not queryable and should be preserved
when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations'
type: object
clusterName:
description: The name of the cluster which the object belongs to. This
is used to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: string
creationTimestamp:
description: Time is a wrapper around time.Time which supports correct
marshaling to YAML and JSON. Wrappers are provided for many of the
factory methods that the time package offers.
format: date-time
type: string
deletionGracePeriodSeconds:
description: Number of seconds allowed for this object to gracefully
terminate before it will be removed from the system. Only set when
deletionTimestamp is also set. May only be shortened. Read-only.
format: int64
type: integer
deletionTimestamp:
description: Time is a wrapper around time.Time which supports correct
marshaling to YAML and JSON. Wrappers are provided for many of the
factory methods that the time package offers.
format: date-time
type: string
finalizers:
description: Must be empty before the object is deleted from the registry.
Each entry is an identifier for the responsible component that will
remove the entry from the list. If the deletionTimestamp of the object
is non-nil, entries in this list can only be removed.
items:
type: string
type: array
generateName:
description: |-
GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.
If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).
Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
type: string
generation:
description: A sequence number representing a specific generation of
the desired state. Populated by the system. Read-only.
format: int64
type: integer
labels:
description: 'Map of string keys and values that can be used to organize
and categorize (scope and select) objects. May match selectors of
replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels'
type: object
managedFields:
description: ManagedFields maps workflow-id and version to the set of
fields that are managed by that workflow. This is mostly for internal
housekeeping, and users typically shouldn't need to set or understand
this field. A workflow can be the user's name, a controller's name,
or the name of a specific apply path like "ci-cd". The set of fields
is always in the version that the workflow used when modifying the
object.
items:
description: ManagedFieldsEntry is a workflow-id, a FieldSet and the
group version of the resource that the fieldset applies to.
properties:
apiVersion:
description: APIVersion defines the version of this resource that
this field set applies to. The format is "group/version" just
like the top-level APIVersion field. It is necessary to track
the version of a field set because it cannot be automatically
converted.
type: string
fieldsType:
description: 'FieldsType is the discriminator for the different
fields format and version. There is currently only one possible
value: "FieldsV1"'
type: string
fieldsV1:
description: |-
FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
Each key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.
The exact format is defined in sigs.k8s.io/structured-merge-diff
type: object
manager:
description: Manager is an identifier of the workflow managing
these fields.
type: string
operation:
description: Operation is the type of operation which lead to
this ManagedFieldsEntry being created. The only valid values
for this field are 'Apply' and 'Update'.
type: string
time:
description: Time is a wrapper around time.Time which supports
correct marshaling to YAML and JSON. Wrappers are provided
for many of the factory methods that the time package offers.
format: date-time
type: string
type: object
type: array
name:
description: 'Name must be unique within a namespace. Is required when
creating resources, although some resources may allow a client to
request the generation of an appropriate name automatically. Name
is primarily intended for creation idempotence and configuration definition.
Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
namespace:
description: |-
Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.
Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces
type: string
ownerReferences:
description: List of objects depended by this object. If ALL objects
in the list have been deleted, this object will be garbage collected.
If this object is managed by a controller, then an entry in this list
will point to this controller, with the controller field set to true.
There cannot be more than one managing controller.
items:
description: OwnerReference contains enough information to let you
identify an owning object. An owning object must be in the same
namespace as the dependent, or be cluster-scoped, so there is no
namespace field.
properties:
apiVersion:
description: API version of the referent.
type: string
blockOwnerDeletion:
description: If true, AND if the owner has the "foregroundDeletion"
finalizer, then the owner cannot be deleted from the key-value
store until this reference is removed. Defaults to false. To
set this field, a user needs "delete" permission of the owner,
otherwise 422 (Unprocessable Entity) will be returned.
type: boolean
controller:
description: If true, this reference points to the managing controller.
type: boolean
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names'
type: string
uid:
description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids'
type: string
required:
- apiVersion
- kind
- name
- uid
type: object
type: array
resourceVersion:
description: |-
An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.
Populated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
type: string
selfLink:
description: |-
SelfLink is a URL representing this object. Populated by the system. Read-only.
DEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.
type: string
uid:
description: |-
UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.
Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
type: string
type: object
spec:
description: PrometheusRuleSpec contains specification parameters for a
Rule.
properties:
groups:
description: Content of Prometheus rule file
items:
description: RuleGroup is a list of sequentially evaluated recording
and alerting rules.
properties:
interval:
type: string
name:
type: string
rules:
items:
description: Rule describes an alerting or recording rule.
properties:
alert:
type: string
annotations:
type: object
expr:
anyOf:
- type: string
- type: integer
for:
type: string
labels:
type: object
record:
type: string
required:
- expr
type: object
type: array
required:
- name
- rules
type: object
type: array
type: object
type: object
version: v1

View File

@ -1,346 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: servicemonitors.monitoring.coreos.com
spec:
group: monitoring.coreos.com
names:
kind: ServiceMonitor
plural: servicemonitors
scope: Namespaced
validation:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
spec:
description: ServiceMonitorSpec contains specification parameters for a
ServiceMonitor.
properties:
endpoints:
description: A list of endpoints allowed as part of this ServiceMonitor.
items:
description: Endpoint defines a scrapeable endpoint serving Prometheus
metrics.
properties:
basicAuth:
description: 'BasicAuth allow an endpoint to authenticate over
basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
properties:
password:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
username:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
type: object
bearerTokenFile:
description: File to read bearer token for scraping targets.
type: string
bearerTokenSecret:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
honorLabels:
description: HonorLabels chooses the metric's labels on collisions
with target labels.
type: boolean
honorTimestamps:
description: HonorTimestamps controls whether Prometheus respects
the timestamps present in scraped data.
type: boolean
interval:
description: Interval at which metrics should be scraped
type: string
metricRelabelings:
description: MetricRelabelConfigs to apply to samples before ingestion.
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. defailt is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
params:
description: Optional HTTP URL parameters
type: object
path:
description: HTTP path to scrape for metrics.
type: string
port:
description: Name of the service port this endpoint refers to.
Mutually exclusive with targetPort.
type: string
proxyUrl:
description: ProxyURL eg http://proxyserver:2195 Directs scrapes
to proxy through this endpoint.
type: string
relabelings:
description: 'RelabelConfigs to apply to samples before scraping.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config'
items:
description: 'RelabelConfig allows dynamic rewriting of the
label set, being applied to samples before ingestion. It defines
`<metric_relabel_configs>`-section of Prometheus configuration.
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
properties:
action:
description: Action to perform based on regex matching.
Default is 'replace'
type: string
modulus:
description: Modulus to take of the hash of the source label
values.
format: int64
type: integer
regex:
description: Regular expression against which the extracted
value is matched. defailt is '(.*)'
type: string
replacement:
description: Replacement value against which a regex replace
is performed if the regular expression matches. Regex
capture groups are available. Default is '$1'
type: string
separator:
description: Separator placed between concatenated source
label values. default is ';'.
type: string
sourceLabels:
description: The source labels select values from existing
labels. Their content is concatenated using the configured
separator and matched against the configured regular expression
for the replace, keep, and drop actions.
items:
type: string
type: array
targetLabel:
description: Label to which the resulting value is written
in a replace action. It is mandatory for replace actions.
Regex capture groups are available.
type: string
type: object
type: array
scheme:
description: HTTP scheme to use for scraping.
type: string
scrapeTimeout:
description: Timeout after which the scrape is ended
type: string
targetPort:
anyOf:
- type: string
- type: integer
tlsConfig:
description: TLSConfig specifies TLS configuration parameters.
properties:
ca: {}
caFile:
description: Path to the CA cert in the Prometheus container
to use for the targets.
type: string
cert: {}
certFile:
description: Path to the client cert file in the Prometheus
container for the targets.
type: string
insecureSkipVerify:
description: Disable target certificate validation.
type: boolean
keyFile:
description: Path to the client key file in the Prometheus
container for the targets.
type: string
keySecret:
description: SecretKeySelector selects a key of a Secret.
properties:
key:
description: The key of the secret to select from. Must
be a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
optional:
description: Specify whether the Secret or its key must
be defined
type: boolean
required:
- key
type: object
serverName:
description: Used to verify the hostname for the targets.
type: string
type: object
type: object
type: array
jobLabel:
description: The label to use to retrieve the job name from.
type: string
namespaceSelector:
description: NamespaceSelector is a selector for selecting either all
namespaces or a list of namespaces.
properties:
any:
description: Boolean describing whether all namespaces are selected
in contrast to a list restricting them.
type: boolean
matchNames:
description: List of namespace names.
items:
type: string
type: array
type: object
podTargetLabels:
description: PodTargetLabels transfers labels on the Kubernetes Pod
onto the target.
items:
type: string
type: array
sampleLimit:
description: SampleLimit defines per-scrape limit on number of scraped
samples that will be accepted.
format: int64
type: integer
selector:
description: A label selector is a label query over a set of resources.
The result of matchLabels and matchExpressions are ANDed. An empty
label selector matches all objects. A null label selector matches
no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains
values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: operator represents a key's relationship to a
set of values. Valid operators are In, NotIn, Exists and
DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator
is In or NotIn, the values array must be non-empty. If the
operator is Exists or DoesNotExist, the values array must
be empty. This array is replaced during a strategic merge
patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
description: matchLabels is a map of {key,value} pairs. A single
{key,value} in the matchLabels map is equivalent to an element
of matchExpressions, whose key field is "key", the operator is
"In", and the values array contains only "value". The requirements
are ANDed.
type: object
type: object
targetLabels:
description: TargetLabels transfers labels on the Kubernetes Service
onto the target.
items:
type: string
type: array
required:
- endpoints
- selector
type: object
type: object
version: v1

View File

@ -1,73 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0
name: prometheus-operator
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- alertmanagers
- prometheuses
- prometheuses/finalizers
- alertmanagers/finalizers
- servicemonitors
- podmonitors
- prometheusrules
verbs:
- '*'
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- '*'
- apiGroups:
- ""
resources:
- configmaps
- secrets
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- delete
- apiGroups:
- ""
resources:
- services
- services/finalizers
- endpoints
verbs:
- get
- create
- update
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0
name: prometheus-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus-operator
subjects:
- kind: ServiceAccount
name: prometheus-operator
namespace: monitoring

View File

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0
name: prometheus-operator
namespace: monitoring
spec:
clusterIP: None
ports:
- name: http
port: 8080
targetPort: http
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator

View File

@ -1,9 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0
name: prometheus-operator
namespace: monitoring

View File

@ -1,14 +0,0 @@
# 用于ansible安装部署配置文件
此项目为容器云部署组件之一,部署文件打包时,会从此目录中取得该组件的部署配置文件。
## 说明
该目录的结构与ansible roles目录结构匹配文件格式都是yaml格式语法是基于jinja2。一般来说开发人员应该能够理解大概脚本含义因此当组件的部署配置更新时请开发人员自行修改此目录下的相关配置脚本。
1. default目录
主要存放组件配置模板文件中使用的变量
2. template目录
存放组件配置模板。模板中的出现的"{{var_x}}"表示引用变量var_x
3. task目录
存放组件部署过程脚本
## 部署验证
开发人员应在组件测试通过之后更新配置文件然后通过jenkins执行一次语法验证。
待补充

View File

@ -1,37 +0,0 @@
- name: prepare some dirs
file: name={{base_dir}}/manifests/prometheus/setup state=directory
- name: 创建组件模板文件
template: src={{ item }} dest={{ base_dir }}/manifests/prometheus/{{ item | basename | regex_replace('\.j2','') }}
with_fileglob:
- "{{base_dir}}/roles/gpaas/monitor/prometheus/templates/*"
- name: 创建CRD模板文件
template: src={{ item }} dest={{ base_dir }}/manifests/prometheus/setup/{{ item | basename | regex_replace('\.j2','') }}
with_fileglob:
- "{{base_dir}}/roles/gpaas/monitor/prometheus/templates/setup/*"
- name: 复制非模板CRD文件
copy: src={{ item }} dest={{ base_dir }}/manifests/prometheus/setup/{{ item | basename }}
with_fileglob:
- "{{base_dir}}/roles/gpaas/monitor/prometheus/files/setup/*"
- name: 复制非模板部署文件
copy: src={{ item }} dest={{ base_dir }}/manifests/prometheus/{{ item | basename }}
with_fileglob:
- "{{base_dir}}/roles/gpaas/monitor/prometheus/files/*"
- name: 部署CRD
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/prometheus/setup"
- name: 部署组件
shell: "{{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/prometheus/"
register: result
until: '"no matches for kind" not in result.stdout'
retries: 5
delay: 5
ignore_errors: true
- name: 防止报错,重试部署组件
shell: "sleep 5 && {{ bin_dir }}/kubectl apply -f {{ base_dir }}/manifests/prometheus/"

View File

@ -1,14 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
k8s-app: alertmanager
name: alertmanager
namespace: monitoring
spec:
endpoints:
- interval: 30s
port: web
selector:
matchLabels:
alertmanager: main

View File

@ -1,18 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: Alertmanager
metadata:
labels:
alertmanager: main
name: main
namespace: monitoring
spec:
baseImage: {{BASE_IMAGE_URL}}/{{ alertmanager_image.split(':')[0] }}
nodeSelector:
kubernetes.io/os: linux
replicas: 3
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
serviceAccountName: alertmanager-main
version: {{ alertmanager_image.split(':')[1] }}

View File

@ -1,9 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-pv
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce

View File

@ -1,216 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: grafana
name: grafana
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- image: {{BASE_IMAGE_URL}}/{{ grafana_image }}
name: grafana
ports:
- containerPort: 3000
name: http
readinessProbe:
httpGet:
path: /api/health
port: http
resources:
limits:
cpu: 200m
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- mountPath: /var/lib/grafana
name: grafana-storage
readOnly: false
- mountPath: /etc/grafana/provisioning/datasources
name: grafana-datasources
readOnly: false
- mountPath: /etc/grafana/provisioning/dashboards
name: grafana-dashboards
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/apiserver
name: grafana-dashboard-apiserver
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/cluster-total
name: grafana-dashboard-cluster-total
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/controller-manager
name: grafana-dashboard-controller-manager
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/k8s-resources-cluster
name: grafana-dashboard-k8s-resources-cluster
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/k8s-resources-namespace
name: grafana-dashboard-k8s-resources-namespace
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/k8s-resources-node
name: grafana-dashboard-k8s-resources-node
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/k8s-resources-pod
name: grafana-dashboard-k8s-resources-pod
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/k8s-resources-workload
name: grafana-dashboard-k8s-resources-workload
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/k8s-resources-workloads-namespace
name: grafana-dashboard-k8s-resources-workloads-namespace
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/kubelet
name: grafana-dashboard-kubelet
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/namespace-by-pod
name: grafana-dashboard-namespace-by-pod
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/namespace-by-workload
name: grafana-dashboard-namespace-by-workload
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/node-cluster-rsrc-use
name: grafana-dashboard-node-cluster-rsrc-use
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/node-rsrc-use
name: grafana-dashboard-node-rsrc-use
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/nodes
name: grafana-dashboard-nodes
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/persistentvolumesusage
name: grafana-dashboard-persistentvolumesusage
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/pod-total
name: grafana-dashboard-pod-total
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/pods
name: grafana-dashboard-pods
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/prometheus-remote-write
name: grafana-dashboard-prometheus-remote-write
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/prometheus
name: grafana-dashboard-prometheus
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/proxy
name: grafana-dashboard-proxy
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/scheduler
name: grafana-dashboard-scheduler
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/statefulset
name: grafana-dashboard-statefulset
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/workload-total
name: grafana-dashboard-workload-total
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/rocketmq
name: grafana-dashboard-rocketmq
readOnly: false
- mountPath: /grafana-dashboard-definitions/0/mysql
name: grafana-dashboard-mysql
readOnly: false
nodeSelector:
beta.kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: grafana
volumes:
- emptyDir: {}
name: grafana-storage
- name: grafana-datasources
secret:
secretName: grafana-datasources
- configMap:
name: grafana-dashboards
name: grafana-dashboards
- configMap:
name: grafana-dashboard-apiserver
name: grafana-dashboard-apiserver
- configMap:
name: grafana-dashboard-cluster-total
name: grafana-dashboard-cluster-total
- configMap:
name: grafana-dashboard-controller-manager
name: grafana-dashboard-controller-manager
- configMap:
name: grafana-dashboard-k8s-resources-cluster
name: grafana-dashboard-k8s-resources-cluster
- configMap:
name: grafana-dashboard-k8s-resources-namespace
name: grafana-dashboard-k8s-resources-namespace
- configMap:
name: grafana-dashboard-k8s-resources-node
name: grafana-dashboard-k8s-resources-node
- configMap:
name: grafana-dashboard-k8s-resources-pod
name: grafana-dashboard-k8s-resources-pod
- configMap:
name: grafana-dashboard-k8s-resources-workload
name: grafana-dashboard-k8s-resources-workload
- configMap:
name: grafana-dashboard-k8s-resources-workloads-namespace
name: grafana-dashboard-k8s-resources-workloads-namespace
- configMap:
name: grafana-dashboard-kubelet
name: grafana-dashboard-kubelet
- configMap:
name: grafana-dashboard-namespace-by-pod
name: grafana-dashboard-namespace-by-pod
- configMap:
name: grafana-dashboard-namespace-by-workload
name: grafana-dashboard-namespace-by-workload
- configMap:
name: grafana-dashboard-node-cluster-rsrc-use
name: grafana-dashboard-node-cluster-rsrc-use
- configMap:
name: grafana-dashboard-node-rsrc-use
name: grafana-dashboard-node-rsrc-use
- configMap:
name: grafana-dashboard-nodes
name: grafana-dashboard-nodes
- configMap:
name: grafana-dashboard-persistentvolumesusage
name: grafana-dashboard-persistentvolumesusage
- configMap:
name: grafana-dashboard-pod-total
name: grafana-dashboard-pod-total
- configMap:
name: grafana-dashboard-pods
name: grafana-dashboard-pods
- configMap:
name: grafana-dashboard-prometheus-remote-write
name: grafana-dashboard-prometheus-remote-write
- configMap:
name: grafana-dashboard-prometheus
name: grafana-dashboard-prometheus
- configMap:
name: grafana-dashboard-proxy
name: grafana-dashboard-proxy
- configMap:
name: grafana-dashboard-scheduler
name: grafana-dashboard-scheduler
- configMap:
name: grafana-dashboard-statefulset
name: grafana-dashboard-statefulset
- configMap:
name: grafana-dashboard-workload-total
name: grafana-dashboard-workload-total
- configMap:
name: grafana-dashboard-rocketmq
name: grafana-dashboard-rocketmq
- configMap:
name: grafana-dashboard-mysql
name: grafana-dashboard-mysql

View File

@ -1,72 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: kube-state-metrics
name: kube-state-metrics
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: kube-state-metrics
template:
metadata:
labels:
app: kube-state-metrics
spec:
containers:
- args:
- --logtostderr
- --secure-listen-address=:8443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://127.0.0.1:8081/
image: {{BASE_IMAGE_URL}}/{{ kube_rbac_proxy_image }}
name: kube-rbac-proxy-main
ports:
- containerPort: 8443
name: https-main
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
- args:
- --logtostderr
- --secure-listen-address=:9443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://127.0.0.1:8082/
image: {{BASE_IMAGE_URL}}/{{ kube_rbac_proxy_image }}
name: kube-rbac-proxy-self
ports:
- containerPort: 9443
name: https-self
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
- args:
- --host=127.0.0.1
- --port=8081
- --telemetry-host=127.0.0.1
- --telemetry-port=8082
image: {{BASE_IMAGE_URL}}/{{ kube_state_metrics_image }}
name: kube-state-metrics
resources:
limits:
cpu: 100m
memory: 150Mi
requests:
cpu: 100m
memory: 150Mi
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: kube-state-metrics

View File

@ -1,87 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: node-exporter
name: node-exporter
namespace: monitoring
spec:
selector:
matchLabels:
app: node-exporter
template:
metadata:
labels:
app: node-exporter
spec:
containers:
- args:
- --web.listen-address=127.0.0.1:9100
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
image: {{BASE_IMAGE_URL}}/{{ node_exporter_image }}
name: node-exporter
resources:
limits:
cpu: 250m
memory: 180Mi
requests:
cpu: 102m
memory: 180Mi
volumeMounts:
- mountPath: /host/proc
name: proc
readOnly: false
- mountPath: /host/sys
name: sys
readOnly: false
- mountPath: /host/root
mountPropagation: HostToContainer
name: root
readOnly: true
- args:
- --logtostderr
- --secure-listen-address=$(IP):9100
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://127.0.0.1:9100/
env:
- name: IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: {{BASE_IMAGE_URL}}/{{ kube_rbac_proxy_image }}
name: kube-rbac-proxy
ports:
- containerPort: 9100
hostPort: 9100
name: https
resources:
limits:
cpu: 20m
memory: 40Mi
requests:
cpu: 10m
memory: 20Mi
hostNetwork: true
hostPID: true
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: node-exporter
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
- hostPath:
path: /
name: root

View File

@ -1,52 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus-adapter
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
name: prometheus-adapter
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
name: prometheus-adapter
spec:
containers:
- args:
- --cert-dir=/var/run/serving-cert
- --config=/etc/adapter/config.yaml
- --logtostderr=true
- --metrics-relist-interval=1m
- --prometheus-url=http://prometheus-k8s.monitoring.svc:9090/
- --secure-port=6443
image: {{BASE_IMAGE_URL}}/{{ prometheus_adapter_image }}
name: prometheus-adapter
ports:
- containerPort: 6443
volumeMounts:
- mountPath: /tmp
name: tmpfs
readOnly: false
- mountPath: /var/run/serving-cert
name: volume-serving-cert
readOnly: false
- mountPath: /etc/adapter
name: config
readOnly: false
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: prometheus-adapter
volumes:
- emptyDir: {}
name: tmpfs
- emptyDir: {}
name: volume-serving-cert
- configMap:
name: adapter-config
name: config

View File

@ -1,33 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
labels:
prometheus: k8s
name: k8s
namespace: monitoring
spec:
alerting:
alertmanagers:
- name: alertmanager-main
namespace: monitoring
port: web
baseImage: {{BASE_IMAGE_URL}}/{{ prometheus_image.split(':')[0] }}
nodeSelector:
kubernetes.io/os: linux
podMonitorSelector: {}
replicas: 2
resources:
requests:
memory: 400Mi
ruleSelector:
matchLabels:
prometheus: k8s
role: alert-rules
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
serviceAccountName: prometheus-k8s
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
version: {{prometheus_image.split(':')[1]}}

View File

@ -1,48 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0
name: prometheus-operator
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/name: prometheus-operator
app.kubernetes.io/version: v0.34.0
spec:
containers:
- args:
- --kubelet-service=kube-system/kubelet
- --logtostderr=true
- --config-reloader-image={{BASE_IMAGE_URL}}/{{ config_reloader_image }}
- --prometheus-config-reloader={{BASE_IMAGE_URL}}/{{prometheus_config_reloader_image}}
image: {{BASE_IMAGE_URL}}/{{ prometheus_operator_image }}
name: prometheus-operator
ports:
- containerPort: 8080
name: http
resources:
limits:
cpu: 200m
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
securityContext:
allowPrivilegeEscalation: false
nodeSelector:
beta.kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: prometheus-operator

View File

@ -1,24 +0,0 @@
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: $0 USER_NAME"
echo "-1"
exit -1
fi
if ! [[ $1 =~ ^[a-zA-Z0-9_]{3,16}$ ]];then
echo "请使用字母数字或下划线开头的3到16个字符的名字"
echo "-1"
exit -1
fi
docker="/opt/k8s/bin/docker exec openvpn"
if ! $docker easyrsa show-cert $1 &> /dev/null;then
echo "没有这个用户,无法删除"
echo "-1"
exit -1
fi
$docker ovpn_revokeclient $1
rm -rf /client.certs/$1

View File

@ -1,19 +0,0 @@
[Unit]
After=network.target docker.service
Requires=docker.service
[Service]
RestartSec=10
Restart=always
Environment="NAME=openvpn"
Environment="DATA_VOL=/opt/openvpn/etc"
Environment="IMG=y2ghost/openvpn:latest"
Environment="PORT=1194:1194/tcp"
ExecStartPre=-/opt/k8s/bin/docker rm -f $NAME
ExecStart=/opt/k8s/bin/docker run --rm --cap-add=NET_ADMIN -v ${DATA_VOL}:/etc/openvpn --name ${NAME} -p ${PORT} ${IMG} ovpn_run $ARGS
[Install]
WantedBy=multi-user.target

View File

@ -1,38 +0,0 @@
if [ $# -ne 1 ]; then
echo "Usage: $0 USER_NAME"
echo "-1"
exit -1
fi
if ! [[ $1 =~ ^[a-zA-Z0-9_]{3,16}$ ]];then
echo "请使用字母数字或下划线开头的3到16个字符的名字"
echo "-1"
exit -1
fi
docker="/opt/k8s/bin/docker exec openvpn"
if $docker easyrsa show-cert $1 &> /dev/null;then
echo "此用户已存在,请重新输入"
echo "-1"
exit -1
fi
$docker easyrsa build-client-full $1 nopass
$docker ovpn_getclient $1 separated
if ! [ -d /client.certs/ ]; then
mkdir /client.certs/
fi
if [ -d /client.certs/$1 ]; then
rm -rf /client.certs/$1
mkdir /client.certs/$1
else
mkdir /client.certs/$1
fi
cp -af /opt/openvpn/etc/clients/$1 /client.certs/
cd /client.certs/$1
rm -f "$1"-combined.ovpn
mv "$1".ovpn client.ovpn

View File

@ -1,63 +0,0 @@
#!/bin/bash
WORK_DIR="$PWD"
IMAGE="y2ghost/openvpn"
CONTAINER_NAME="openvpn"
SRV_NAME="docker-openvpn.service"
HOST_OVPN_CONF_DIR="/opt/openvpn/etc"
MOUNT_OVPN_CONF_DIR="${HOST_OVPN_CONF_DIR}:/etc/openvpn"
usage() {
echo "用法: $0 <VPN Server IP> <router>"
echo "示例: $0 10.2.1.224 10.2.0.0/16"
}
OVPN_SERVER_HOST="$1"
if [ -z "${OVPN_SERVER_HOST:-}" ]; then
usage
exit 1
fi
OVPN_ROUTER="$2"
if [ -z "${OVPN_ROUTER:-}" ]; then
usage
exit 1
fi
# 尝试导入当前目录下的openvpn环境变量文件
VARS_ENV=""
LOCAL_VARS_FILE="${WORK_DIR}/ovpn_vars"
OVPN_VARS_FILE="${HOST_OVPN_CONF_DIR}/vars"
[ -r "$LOCAL_VARS_FILE" ] && {
if ! source "$LOCAL_VARS_FILE"; then
echo "无法导入$LOCAL_VARS_FILE文件中的变量,请确认变量格式是否正常!"
exit 1
fi
VARS_ENV="-e EASYRSA_VARS_FILE=/etc/openvpn/vars"
}
echo "开始安装openvpn docker服务"
echo "尝试卸载已安装的openvpn docker服务"
systemctl stop --now ${SRV_NAME}
echo "生成新的服务配置数据"
rm -rf ${HOST_OVPN_CONF_DIR}
mkdir -p ${HOST_OVPN_CONF_DIR}
cp ${LOCAL_VARS_FILE} ${OVPN_VARS_FILE}
docker=/opt/k8s/bin/docker
$docker load -i ${WORK_DIR}/openvpn-docker-image.tar
$docker run -v ${MOUNT_OVPN_CONF_DIR} --rm ${IMAGE} ovpn_genconfig -N -u tcp://${OVPN_SERVER_HOST} -r ${OVPN_ROUTER}
echo "初始化PKI数据"
$docker run ${VARS_ENV} -v ${MOUNT_OVPN_CONF_DIR} --rm -it ${IMAGE} ovpn_initpki
echo "安装和启动服务"
cp -af ${WORK_DIR}/${SRV_NAME} /etc/systemd/system/${SRV_NAME}
systemctl enable --now ${SRV_NAME}
install ${WORK_DIR}/genovpnuser /usr/local/bin/genovpnuser
install ${WORK_DIR}/delovpnuser /usr/local/bin/delovpnuser
install ${WORK_DIR}/output.sh /usr/local/bin/output.sh
cp -af ${WORK_DIR}/stream.txt /usr/local/bin/stream.txt

View File

@ -1,22 +0,0 @@
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: $0 USER_NAME"
echo "-1"
exit -1
fi
if ! [[ $1 =~ ^[a-zA-Z0-9_]{3,16}$ ]];then
echo "请使用字母数字或下划线开头的3到16个字符的名字"
echo "-1"
exit -1
fi
cd /client.certs
cp /usr/local/bin/stream.txt tempstream.txt
sed -i "/^<config>/r $1/client.ovpn" tempstream.txt
sed -i "/^<ca>/r $1/ca.crt" tempstream.txt
sed -i "/^<crt>/r $1/${1}.crt" tempstream.txt
sed -i "/^<key>/r $1/${1}.key" tempstream.txt
sed -i "/^<ta>/r $1/ta.key" tempstream.txt
cat tempstream.txt
rm -rf tempstream.txt

View File

@ -1,8 +0,0 @@
# 默认就行
EASYRSA_REQ_COUNTRY="CN"
EASYRSA_REQ_PROVINCE="HuNan"
EASYRSA_REQ_CITY="ChangSha"
EASYRSA_REQ_ORG="Greatwall Corporation"
EASYRSA_REQ_EMAIL="yuliangliang@greatwall.com.cn"
EASYRSA_REQ_OU="Product Department"
EASYRSA_REQ_CN="Private Cloud OpenVPN"

View File

@ -1,10 +0,0 @@
<config>
</config>
<ca>
</ca>
<crt>
</crt>
<key>
</key>
<ta>
</ta>

View File

@ -1,19 +0,0 @@
- name: 创建目录
file: name=/data/openvpn state=directory
- name: 复制openvpn 镜像文件
copy: src={{ base_dir }}/bin/openvpn/openvpn-docker-image.tar dest=/data/openvpn/ mode=0755
- name: 复制安装脚本文件
copy: src={{ item }} dest=/data/openvpn/{{ item }} mode=0755
with_items:
- delovpnuser
- docker-openvpn.service
- genovpnuser
- install.sh
- output.sh
- stream.txt
- ovpn_vars
- name: 执行安装脚本
shell: "cd /data/openvpn && sh install.sh {{openvpnExternalIp}} {{openvpnRouteCIDR}}"

View File

@ -1,5 +0,0 @@
- name: 创建skywalking.yaml文件
template: src=skywalking/skywalking.yaml dest={{ base_dir }}/manifests/proprietary-cloud/skywalking.yaml
- name: 部署skywalking.yaml
shell: "{{bin_dir}}/kubectl apply -f {{ base_dir }}/manifests/proprietary-cloud/skywalking.yaml -n kube-public"