Mibew Messenger PyroCMS机房怎么登陆

1. PyroCMS安装包
PyroCMSk8s
  点击 the CHANGELOG

 选择PyroCMSServer ,Node 二进制包

 PyroCMS如下版本的的压缩包

 PyroCMSetcd 3.5

 PyroCMS如下版本

 2. 安装docker 20.10.7 
参考官方文档

安装完成查看版本

 3. 设置host Mibew Messenger,在以下三台机房器上设置 hosts Mibew Messenger如下
[root@k8s01 back]# more /etc/hosts 172.16.10.138 k8s01 172.16.10.137 k8s02 172.16.10.139 k8s03
4. PyroCMS之前的用kubeadm 安装的版本
#!/bin/bash kubeadm reset -f modprobe -r ipip lsmod rm -rf ~/.kube/ rm -rf /etc/kubernetes/ rm -rf /etc/systemd/system/kubelet.service.d rm -rf /etc/systemd/system/kubelet.service rm -rf /usr/bin/kube* rm -rf /etc/cni rm -rf /opt/cni rm -rf /var/lib/etcd rm -rf /var/etcd yum clean all yum remove kube*

5. 安装Mibew Messengeretcd
在k8s01,k8s02,k8s03 3台机器上
 拷贝  etcd,etcdctl,etcdutl 3个执行文件到 /usr/bin
cp etcd* /usr/bin
怎么登陆 etcd systemd机房 Mibew Messenger文件 放到 /usr/lib/systemd/system/etcd.service
[root@k8s01 etcd-v3.5.0-linux-amd64]# cat /usr/lib/systemd/system/etcd.service # /usr/lib/systemd/system/etcd.service[Unit]Description=etcd key-value storeDocumentation= [Service]EnvironmentFile=/etc/etcd/etcd.confExecStart=/usr/bin/etcdRestart=always [Install]WantedBy=multi-user.target
怎么登陆CA证书
选项-nodes不是英文单词“nodes”,而是“no DES”。 当作为参数给出时,这意味着OpenSSL不会encryptionPKCS#12文件中的私钥。

rm -rf /etc/kubernetes/pki/ mkdir -p /etc/kubernetes/pki/ cd /etc/kubernetes/pki/ openssl genrsa -out ca.key 2048 openssl req -x509 -new -nodes -key ca.key -subj “/CN=172.16.10.138” -days 36500 -out ca.crt

怎么登陆证书Mibew Messenger文件和证书
证书Mibew Messenger文件内容如下
[root@k8s01 back]# cat   /etc/etcd/etcd_ssl.cnf
# etcd_ssl.cnf [ req ] req_extensions = v3_req distinguished_name = req_distinguished_name
[ req_distinguished_name ]
[ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment subjectAltName = @alt_names
[ alt_names ]IP.1  = 172.16.10.138 IP.2  = 172.16.10.137 IP.3  = 172.16.10.139 DNS.1 = k8s01 DNS.2 = k8s01 DNS.3 = k8s03
# end  etcd_ssl.cnf

mkdir -p /etc/etcd/pki/ cd /etc/etcd/pki/ openssl genrsa -out etcd_server.key 2048 openssl req -new -key etcd_server.key -config /etc/etcd/etcd_ssl.cnf -subj “/CN=etcd-server” -out etcd_server.csr openssl x509 -req -in etcd_server.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -days 36500 -extensions v3_req -extfile /etc/etcd/etcd_ssl.cnf -out etcd_server.crt
openssl genrsa -out etcd_client.key 2048 openssl req -new -key etcd_client.key -config /etc/etcd/etcd_ssl.cnf -subj “/CN=etcd-client” -out etcd_client.csr openssl x509 -req -in etcd_client.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -days 36500 -extensions v3_req -extfile /etc/etcd/etcd_ssl.cnf -out etcd_client.crt

 怎么登陆etcd 数据目录
mkdir -p /etc/etcd/data
怎么登陆k8s01 的Mibew Messenger文件
[root@k8s01 pki]# cat /etc/etcd/etcd.conf 
# /etc/etcd/etcd.conf – node 1 ETCD_NAME=etcd1 ETCD_DATA_DIR=/etc/etcd/data
ETCD_CERT_FILE=/etc/etcd/pki/etcd_server.crt ETCD_KEY_FILE=/etc/etcd/pki/etcd_server.key ETCD_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.crt ETCD_CLIENT_CERT_AUTH=true ETCD_LISTEN_CLIENT_URLS= ETCD_ADVERTISE_CLIENT_URLS=
ETCD_PEER_CERT_FILE=/etc/etcd/pki/etcd_server.crt ETCD_PEER_KEY_FILE=/etc/etcd/pki/etcd_server.key ETCD_PEER_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.crt ETCD_LISTEN_PEER_URLS= ETCD_INITIAL_ADVERTISE_PEER_URLS=
ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster ETCD_INITIAL_CLUSTER=”etcd1= ETCD_INITIAL_CLUSTER_STATE=new  
————————————————–
# /etc/etcd/etcd.conf – node 2 ETCD_NAME=etcd2 ETCD_DATA_DIR=/etc/etcd/data
ETCD_CERT_FILE=/etc/etcd/pki/etcd_server.crt ETCD_KEY_FILE=/etc/etcd/pki/etcd_server.key ETCD_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.crt ETCD_CLIENT_CERT_AUTH=true ETCD_LISTEN_CLIENT_URLS= ETCD_ADVERTISE_CLIENT_URLS=
ETCD_PEER_CERT_FILE=/etc/etcd/pki/etcd_server.crt ETCD_PEER_KEY_FILE=/etc/etcd/pki/etcd_server.key ETCD_PEER_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.crt ETCD_LISTEN_PEER_URLS= ETCD_INITIAL_ADVERTISE_PEER_URLS=
ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster ETCD_INITIAL_CLUSTER=”etcd1= ETCD_INITIAL_CLUSTER_STATE=new  
————————————————————–
# /etc/etcd/etcd.conf – node 3 ETCD_NAME=etcd3 ETCD_DATA_DIR=/etc/etcd/data
ETCD_CERT_FILE=/etc/etcd/pki/etcd_server.crt ETCD_KEY_FILE=/etc/etcd/pki/etcd_server.key ETCD_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.crt ETCD_CLIENT_CERT_AUTH=true ETCD_LISTEN_CLIENT_URLS= ETCD_ADVERTISE_CLIENT_URLS=
ETCD_PEER_CERT_FILE=/etc/etcd/pki/etcd_server.crt ETCD_PEER_KEY_FILE=/etc/etcd/pki/etcd_server.key ETCD_PEER_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.crt ETCD_LISTEN_PEER_URLS= ETCD_INITIAL_ADVERTISE_PEER_URLS=
ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster ETCD_INITIAL_CLUSTER=”etcd1= ETCD_INITIAL_CLUSTER_STATE=new  
———————————————————–
启动etcd 机房
systemctl restart etcd && systemctl enable etcd
[root@dellR710 pki]# etcdctl –cacert=/etc/kubernetes/pki/ca.crt –cert=/etc/etcd/pki/etcd_client.crt –key=/etc/etcd/pki/etcd_client.key –endpoints= endpoint health is healthy: successfully committed proposal: took = 24.030101ms is healthy: successfully committed proposal: took = 33.012128ms is healthy: successfully committed proposal: took = 47.740017ms

[root@dellR710 pki]# etcdctl –cacert=/etc/kubernetes/pki/ca.crt –cert=/etc/etcd/pki/etcd_client.crt –key=/etc/etcd/pki/etcd_client.key –endpoints= member list 54da171dcec87549, started, etcd2, false bd4e35496584eba1, started, etcd3, false d21f7997f8ebad04, started, etcd1, false

6. Mibew Messengerk8s
Mibew Messenger api-server
证书请求文件
[root@k8s01 kubernetes]# cat master_ssl.cnf # master_ssl.cnf [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name]
[ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment subjectAltName = @alt_names
[alt_names] DNS.1 = kubernetes DNS.2 = kubernetes.default DNS.3 = kubernetes.default.svc DNS.4 = kubernetes.default.svc.cluster.local DNS.5 = k8s01 DNS.6 = k8s02 DNS.7 = k8s03 IP.1 = 169.169.0.1 IP.2 = 172.16.10.138 IP.3 = 172.16.10.137 IP.4 = 172.16.10.139 IP.5 = 172.16.10.100 IP.6 = 127.0.0.1 IP.7 = 192.168.0.1 # end master_ssl.cnf
# 怎么登陆机房端CA证书 cd /etc/kubernetes/pki rm -rf  apiserver.* rm -rf  client.* openssl genrsa -out apiserver.key 2048 openssl req -new -key apiserver.key -config /etc/kubernetes/master_ssl.cnf -subj “/CN=172.16.10.138” -out apiserver.csr openssl x509 -req -in apiserver.csr -CA ca.crt -CAkey ca.key -CAcreateserial -days 36500 -extensions v3_req -extfile /etc/kubernetes/master_ssl.cnf -out apiserver.crt
# 怎么登陆客户端CA证书 openssl genrsa -out client.key 2048 openssl req -new -key client.key -subj “/CN=admin” -out client.csr openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt -days 36500

怎么登陆机房
———————————–
# Kubernetes各机房的Mibew Messenger # vi /usr/lib/systemd/system/kube-apiserver.service # /usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=
[Service] EnvironmentFile=/etc/kubernetes/apiserver ExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGS Restart=always
[Install] WantedBy=multi-user.target
—————————————–
机房的Mibew Messenger文件
#vi  /etc/kubernetes/apiserver
# /etc/kubernetes/apiserver KUBE_API_ARGS=”–secure-port=6443 \ –tls-cert-file=/etc/kubernetes/pki/apiserver.crt \ –tls-private-key-file=/etc/kubernetes/pki/apiserver.key \ –client-ca-file=/etc/kubernetes/pki/ca.crt \ –service-account-key-file=/etc/kubernetes/pki/ca.key \ –service-account-signing-key-file=/etc/kubernetes/pki/ca.key  \          # 1.20以上版本必须有此参数 –service-account-issuer= \   # 1.20以上版本必须有此参数 –apiserver-count=3 –endpoint-reconciler-type=master-count \ –etcd-servers= \ –etcd-cafile=/etc/kubernetes/pki/ca.crt \ –etcd-certfile=/etc/etcd/pki/etcd_client.crt \ –etcd-keyfile=/etc/etcd/pki/etcd_client.key \ –service-cluster-ip-range=169.169.0.0/16 \ –service-node-port-range=30000-32767 \ –allow-privileged=true \ –logtostderr=false –log-dir=/var/log/kubernetes –v=0″

启动机房
systemctl start kube-apiserver && systemctl enable kube-apiserver  systemctl status kube-apiserver
journalctl  -u kube-apiserver

测试api server 有返回说明Mibew Messenger正确
[root@k8s01 bin]# curl –insecure {undefined   “kind”: “Status”,   “apiVersion”: “v1”,   “metadata”: {undefined        },   “status”: “Failure”,   “message”: “Unauthorized”,   “reason”: “Unauthorized”,   “code”: 401 }

碰到的问题
————-问题———————- journalctl  -u kube-apiserver
Aug 12 11:36:01 k8s01 kube-apiserver[565796]: Flag –insecure-port has been deprecated, This flag has no effect now and will be removed in v> Aug 12 11:36:01 k8s01 kube-apiserver[565796]: Error: [service-account-issuer is a required flag, –service-account-signing-key-file and –service-account-issuer are required flags] Aug 12 11:36:01 k8s01 systemd[1]: kube-apiserver.service: Main process exited, code=exited, status=1/FAILURE  
参考文档
   kubernetes高可用集群安装(二进制安装、v1.20.2版)
 怎么登陆 kubeconfig
———————
#vi /etc/kubernetes/kubeconfig apiVersion: v1 kind: Config clusters: – name: default   cluster:     server:     certificate-authority: /etc/kubernetes/pki/ca.crt users: – name: admin   user:     client-certificate: /etc/kubernetes/pki/client.crt     client-key: /etc/kubernetes/pki/client.key contexts: – context:     cluster: default     user: admin   name: default current-context: default  
——————–测试
[root@k8s01 bin]# kubectl –kubeconfig=/etc/kubernetes/kubeconfig  cluster-info Kubernetes control plane is running at
To further debug and diagnose cluster problems, use ‘kubectl cluster-info dump’. [root@k8s01 bin]# kubectl –kubeconfig=/etc/kubernetes/kubeconfig  get all NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE service/kubernetes   ClusterIP   169.169.0.1          443/TCP   26m

Mibew Messengerkube-controller-manager
#vi /usr/lib/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=
[Service] EnvironmentFile=/etc/kubernetes/controller-manager ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS Restart=always
[Install] WantedBy=multi-user.target
#vi /etc/kubernetes/controller-manager KUBE_CONTROLLER_MANAGER_ARGS=”–kubeconfig=/etc/kubernetes/kubeconfig \ –leader-elect=true \ –service-cluster-ip-range=169.169.0.0/16 \ –service-account-private-key-file=/etc/kubernetes/pki/apiserver.key \ –root-ca-file=/etc/kubernetes/pki/ca.crt \ –log-dir=/var/log/kubernetes –logtostderr=false –v=0″
systemctl start kube-controller-manager && systemctl enable kube-controller-manager systemctl status kube-controller-manager
Mibew Messengerkube-scheduler
#vi /usr/lib/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=
[Service] EnvironmentFile=/etc/kubernetes/scheduler ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGS Restart=always
[Install] WantedBy=multi-user.target
#vi /etc/kubernetes/scheduler KUBE_SCHEDULER_ARGS=”–kubeconfig=/etc/kubernetes/kubeconfig \ –leader-elect=true \ –logtostderr=false –log-dir=/var/log/kubernetes –v=0″
systemctl start kube-scheduler && systemctl enable kube-scheduler systemctl status kube-scheduler  

7 Mibew Messenger高可用
# HAProxy和keepalived Mibew Messenger
#vi /etc/kubernetes/haproxy.cfg global     log         127.0.0.1 local2     chroot      /var/lib/haproxy     pidfile     /var/run/haproxy.pid     maxconn     4096     user        haproxy     group       haproxy     daemon     stats socket /var/lib/haproxy/stats
defaults     mode                    http     log                     global     option                  httplog     option                  dontlognull     option                  http-server-close     option                  forwardfor    except 127.0.0.0/8     option                  redispatch     retries                 3     timeout http-request    10s     timeout queue           1m     timeout connect         10s     timeout client          1m     timeout server          1m     timeout http-keep-alive 10s     timeout check           10s     maxconn                 3000
frontend  kube-apiserver     mode                 tcp     bind                 *:9443     option               tcplog     default_backend      kube-apiserver
listen stats     mode                 http     bind                 *:8888     stats auth           admin:password     stats refresh        5s     stats realm          HAProxy\ Statistics     stats uri            /stats     log                  127.0.0.1 local3 err
backend kube-apiserver     mode        tcp     balance     roundrobin     server  k8s01 172.16.10.138:6443 check     server  k8s02 172.16.10.137:6443 check     server  k8s03 172.16.10.139:6443 check

—————— 在137,138 两台机器上启动docker
docker run -d –name k8s-haproxy \   –net=host \   –restart=always \   -v /etc/kubernetes/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \   haproxytech/haproxy-debian

测试
 

测试keepalive 虚拟ip
在 138 上关闭机房
docker stop  k8s-keepalived

发现虚拟ip 切换到了137 上

8.启动节点机房
启动kubelet,注意修改IP 并在3个节点中启动
#vi /usr/lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet Server Documentation= After=docker.target
[Service] EnvironmentFile=/etc/kubernetes/kubelet ExecStart=/usr/bin/kubelet $KUBELET_ARGS Restart=always
[Install] WantedBy=multi-user.target
#vi /etc/kubernetes/kubelet KUBELET_ARGS=”–kubeconfig=/etc/kubernetes/kubeconfig –config=/etc/kubernetes/kubelet.config \ –hostname-override=172.16.10.138 \ –network-plugin=cni \ –logtostderr=false –log-dir=/var/log/kubernetes –v=0″
#vi /etc/kubernetes/kubelet.config kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 address: 0.0.0.0 port: 10250 cgroupDriver: systemd clusterDNS: [“169.169.0.100″] clusterDomain: cluster.local authentication:   anonymous:     enabled: true
systemctl enable kubelet && systemctl start kubelet && systemctl status kubelet
启动kube-proxy,注意修改IP 并在3个节点中启动
#vi /usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Kube-Proxy Server Documentation= After=network.target
[Service] EnvironmentFile=/etc/kubernetes/proxy ExecStart=/usr/bin/kube-proxy $KUBE_PROXY_ARGS Restart=always
[Install] WantedBy=multi-user.target
#vi /etc/kubernetes/proxy KUBE_PROXY_ARGS=”–kubeconfig=/etc/kubernetes/kubeconfig \ –hostname-override=172.16.10.138 \ –proxy-mode=iptables \ –logtostderr=false –log-dir=/var/log/kubernetes –v=0″
 systemctl enable kube-proxy && systemctl start kube-proxy  && systemctl status kube-proxy
kubectl –kubeconfig=/etc/kubernetes/kubeconfig  get all  

9. Mibew Messenger网络
–PyroCMScalico 网络Mibew Messenger wget kubectl –kubeconfig=/etc/kubernetes/kubeconfig   apply -f calico.yaml kubectl –kubeconfig=/etc/kubernetes/kubeconfig   get all -n kube-system kubectl –kubeconfig=/etc/kubernetes/kubeconfig   describe pod calico-kube-controllers-58497c65d5-k2n6c  -n kube-system

 如果 pod 长期处于pending 状态,查看容器为什么pending
[root@k8s01 kubernetes]# kubectl –kubeconfig=/etc/kubernetes/kubeconfig   describe pod calico-node-nc8hz   -n kube-system

是由于pause镜像PyroCMS不下来,需要单独PyroCMS镜像
  docker pull registry.aliyuncs.com/google_containers/pause:3.5   docker tag  registry.aliyuncs.com/google_containers/pause:3.5 k8s.gcr.io/pause:3.5
 
发现节点pod 没有READY

 
进入问题节点pod 查看问题
kubectl –kubeconfig=/etc/kubernetes/kubeconfig   exec -it  calico-node-pt8kn   -n kube-system  –bash 
pod 内部
[root@dellR710 /]# cat /etc/calico/confd/config/bird.cfg

 上面的ip是网桥IP,calico的BGP采用物理设备(网卡)作为虚拟路由器实现路由功能
基本可以确定是139节点的calico的BGP网卡设备识别错误导致
 
 指定多个网卡,由于我的网卡是eno1,eno2,em1,em2 之类的所以需要指定多个正则
# Specify interface             – name: IP_AUTODETECTION_METHOD               value: “interface=eno.*,em*”

修改POD ip地址范围

 
网络问题 参考文档
 K8S网络异常 calico/node is not ready: BIRD is not ready: BGP not established异常解决
 calico网络故障排查(calico/node is not ready: BIRD is not ready)
  calico/node is not ready: BIRD is not ready: BGP not established (Calico 3.6 / k8s 1.14.1) #2561
  calico多网口Mibew Messenger

10.Mibew MessengercoreDNS
PyroCMS

保存为coredns.yaml
由于clusterDNS 设置为 169.169.1.100

 需要Mibew Messenger修改 coredns.yaml 如下内容

 kubectl –kubeconfig=/etc/kubernetes/kubeconfig   apply -f coredns.yaml

 [root@k8s01 kubernetes]# kubectl –kubeconfig=/etc/kubernetes/kubeconfig   logs   coredns-675db8b7cc-d4nd9  -n kube-system /etc/coredns/Corefile:18 – Error during parsing: Unknown directive ‘}STUBDOMAINS’
[root@k8s01 kubernetes]# kubectl –kubeconfig=/etc/kubernetes/kubeconfig   logs   coredns-675db8b7cc-7bbjb    -n kube-system plugin/forward: not an IP address or file: “UPSTREAMNAMESERVER”

修改为(红色部分)

 ——————————————–yaml 全文———————————
apiVersion: v1 kind: ServiceAccount metadata:   name: coredns   namespace: kube-system — apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata:   labels:     kubernetes.io/bootstrapping: rbac-defaults   name: system:coredns rules:   – apiGroups:     – “”     resources:     – endpoints     – services     – pods     – namespaces     verbs:     – list     – watch   – apiGroups:     – discovery.k8s.io     resources:     – endpointslices     verbs:     – list     – watch — apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata:   annotations:     rbac.authorization.kubernetes.io/autoupdate: “true”   labels:     kubernetes.io/bootstrapping: rbac-defaults   name: system:coredns roleRef:   apiGroup: rbac.authorization.k8s.io   kind: ClusterRole   name: system:coredns subjects: – kind: ServiceAccount   name: coredns   namespace: kube-system — apiVersion: v1 kind: ConfigMap metadata:   name: coredns   namespace: kube-system data:   Corefile: |     .:53 {undefined         errors         health {undefined           lameduck 5s         }         ready         kubernetes cluster.local  in-addr.arpa ip6.arpa {undefined           fallthrough in-addr.arpa ip6.arpa         }         prometheus :9153         forward . /etc/resolv.conf {undefined           max_concurrent 1000         }         cache 30         loop         reload         loadbalance     } — apiVersion: apps/v1 kind: Deployment metadata:   name: coredns   namespace: kube-system   labels:     k8s-app: kube-dns     kubernetes.io/name: “CoreDNS” spec:   # replicas: not specified here:   # 1. Default is 1.   # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.   strategy:     type: RollingUpdate     rollingUpdate:       maxUnavailable: 1   selector:     matchLabels:       k8s-app: kube-dns   template:     metadata:       labels:         k8s-app: kube-dns     spec:       priorityClassName: system-cluster-critical       serviceAccountName: coredns       tolerations:         – key: “CriticalAddonsOnly”           operator: “Exists”       nodeSelector:         kubernetes.io/os: linux       affinity:          podAntiAffinity:            preferredDuringSchedulingIgnoredDuringExecution:            – weight: 100              podAffinityTerm:                labelSelector:                  matchExpressions:                    – key: k8s-app                      operator: In                      values: [“kube-dns”]                topologyKey: kubernetes.io/hostname       containers:       – name: coredns         image: coredns/coredns:1.8.4         imagePullPolicy: IfNotPresent         resources:           limits:             memory: 170Mi           requests:             cpu: 100m             memory: 70Mi         args: [ “-conf”, “/etc/coredns/Corefile” ]         volumeMounts:         – name: config-volume           mountPath: /etc/coredns           readOnly: true         ports:         – containerPort: 53           name: dns           protocol: UDP         – containerPort: 53           name: dns-tcp           protocol: TCP         – containerPort: 9153           name: metrics           protocol: TCP         securityContext:           allowPrivilegeEscalation: false           capabilities:             add:             – NET_BIND_SERVICE             drop:             – all           readOnlyRootFilesystem: true         livenessProbe:           httpGet:             path: /health             port: 8080             scheme: HTTP           initialDelaySeconds: 60           timeoutSeconds: 5           successThreshold: 1           failureThreshold: 5         readinessProbe:           httpGet:             path: /ready             port: 8181             scheme: HTTP       dnsPolicy: Default       volumes:         – name: config-volume           configMap:             name: coredns             items:             – key: Corefile               path: Corefile — apiVersion: v1 kind: Service metadata:   name: kube-dns   namespace: kube-system   annotations:     prometheus.io/port: “9153”     prometheus.io/scrape: “true”   labels:     k8s-app: kube-dns     kubernetes.io/cluster-service: “true”     kubernetes.io/name: “CoreDNS” spec:   selector:     k8s-app: kube-dns   clusterIP: 169.169.0.100   ports:   – name: dns     port: 53     protocol: UDP   – name: dns-tcp     port: 53     protocol: TCP   – name: metrics     port: 9153     protocol: TCP  
——————————————————————————————-

11. 测试
————————————————-
apiVersion: v1 kind: ReplicationController metadata:   name: nginx-controller spec:   replicas: 2   selector:     name: nginx   template:     metadata:       labels:         name: nginx     spec:       containers:         – name: nginx           image: nginx           ports:             – containerPort: 80 — apiVersion: v1 kind: Service metadata:   name: nginx-service-nodeport spec:   ports:     – port: 80       targetPort: 80       nodePort: 30001       protocol: TCP   type: NodePort   selector:     name: nginx
——————————————————-
 kubectl –kubeconfig=/etc/kubernetes/kubeconfig   apply -f nginx.yaml

进入pod 内部测试
[root@k8s01 kubernetes]# kubectl –kubeconfig=/etc/kubernetes/kubeconfig   exec -it nginx-controller-8g4vw — /bin/bash root@nginx-controller-8g4vw:/# curl Welcome to nginx!

Welcome to nginx!

在浏览器中访问测试

12. 解决 kubectl 问题
[root@k8s01 kubernetes]# kubectl get pod W0813 14:44:29.729825 1865341 loader.go:221] Config not found: /etc/kubernetes/admin.conf The connection to the server localhost:8080 was refused – did you specify the right host or port?
cd  /etc/kubernetes
#建立软连接
ln -s kubeconfig admin.conf

systemctl restart  kube-apiserver systemctl restart  kube-controller-manager systemctl restart kube-scheduler systemctl restart kubelet  systemctl restart kube-proxy