{"id":1438,"date":"2023-03-23T22:03:52","date_gmt":"2023-03-23T14:03:52","guid":{"rendered":"https:\/\/www.appblog.cn\/?p=1438"},"modified":"2023-04-28T20:59:48","modified_gmt":"2023-04-28T12:59:48","slug":"build-k8s-cluster-using-kubeadm-method-in-centos-7-system","status":"publish","type":"post","link":"https:\/\/www.appblog.cn\/index.php\/2023\/03\/23\/build-k8s-cluster-using-kubeadm-method-in-centos-7-system\/","title":{"rendered":"Centos 7\u7cfb\u7edf\u4e0bKubeadm\u65b9\u5f0f\u642d\u5efak8s\u96c6\u7fa4"},"content":{"rendered":"<h2>Kubernetes\u96c6\u7fa4\u90e8\u7f72\u65b9\u5f0f<\/h2>\n<ul>\n<li><code>Minikube<\/code>\u65b9\u5f0f<\/li>\n<\/ul>\n<p><code>Minikube<\/code>\u662f\u4e00\u4e2a\u5de5\u5177\uff0c\u53ef\u4ee5\u5728\u672c\u5730\u5feb\u901f\u8fd0\u884c\u4e00\u4e2a\u5355\u70b9\u7684Kubernetes\uff0c\u9002\u5408\u5c1d\u8bd5Kubernetes\u6216\u65e5\u5e38\u5f00\u53d1\u7684\u7528\u6237\u4f7f\u7528\uff0c\u4f46\u662f\u4e0d\u80fd\u7528\u4e8e\u751f\u4ea7\u73af\u5883\u3002<\/p>\n<p><!-- more --><\/p>\n<ul>\n<li><code>kubeadm<\/code>\u65b9\u5f0f<\/li>\n<\/ul>\n<p><code>Kubeadm<\/code>\u4e5f\u662f\u4e00\u4e2a\u5de5\u5177\uff0c\u63d0\u4f9b<code>kubeadm init<\/code>\u548c<code>kubeadm join<\/code>\uff0c\u53ef\u7528\u4e8e\u5feb\u901f\u90e8\u7f72Kubernetes\u96c6\u7fa4\u3002<\/p>\n<ul>\n<li>\u4e8c\u8fdb\u5236\u5305\u65b9\u5f0f<\/li>\n<\/ul>\n<p>\u4ece\u5b98\u65b9\u4e0b\u8f7d\u53d1\u884c\u7248\u7684\u4e8c\u8fdb\u5236\u5305\uff0c\u624b\u52a8\u90e8\u7f72\u6bcf\u4e2a\u7ec4\u4ef6\uff0c\u7ec4\u6210Kubernetes\u96c6\u7fa4\uff0c\u8fc7\u7a0b\u8f83\u4e3a\u7e41\u7410\u3002<\/p>\n<p>\u751f\u4ea7\u73af\u5883\u4e2d\u90e8\u7f72Kubernetes\u96c6\u7fa4\uff0c\u4f7f\u7528Kubeadm\u548c\u4e8c\u8fdb\u5236\u5305\u90e8\u7f72\u4e24\u79cd\u65b9\u5f0f\u3002Kubeadm\u90e8\u7f72\u65b9\u5f0f\u964d\u4f4e\u4e86\u90e8\u7f72\u95e8\u69db\uff0c\u4f46\u5c4f\u853d\u4e86\u5f88\u591a\u7ec6\u8282\uff0c\u9047\u5230\u95ee\u9898\u5c31\u5f88\u96be\u6392\u67e5\u3002<br \/>\n\u5b9e\u9645\u751f\u4ea7\u73af\u5883\u4e2d\u6700\u597d\u4f7f\u7528\u4e8c\u8fdb\u5236\u5305\u90e8\u7f72Kubernetes\u96c6\u7fa4\uff0c\u867d\u7136\u7e41\u7410\uff0c\u4f46\u6709\u5229\u4e8e\u4e86\u89e3\u5176\u4e2d\u5f88\u591a\u5de5\u4f5c\u539f\u7406\uff0c\u66f4\u6709\u5229\u4e8e\u540e\u671f\u7ef4\u62a4\u3002<\/p>\n<h2>\u57fa\u7840\u73af\u5883\u914d\u7f6e\u8bf4\u660e<\/h2>\n<h3>\u4e3b\u673a\u89d2\u8272\u5b89\u6392<\/h3>\n<table>\n<thead>\n<tr>\n<th style=\"text-align: left;\">\u4e3b\u673a\u540d<\/th>\n<th style=\"text-align: left;\">IP<\/th>\n<th style=\"text-align: left;\">\u529f\u80fd<\/th>\n<\/tr>\n<\/thead>\n<tbody>\n<tr>\n<td style=\"text-align: left;\"><strong>k8s-master<\/strong><\/td>\n<td style=\"text-align: left;\">192.168.0.8<\/td>\n<td style=\"text-align: left;\">master<\/td>\n<\/tr>\n<tr>\n<td style=\"text-align: left;\"><strong>k8s-node01<\/strong><\/td>\n<td style=\"text-align: left;\">192.168.1.8<\/td>\n<td style=\"text-align: left;\">node<\/td>\n<\/tr>\n<tr>\n<td style=\"text-align: left;\"><strong>k8s-node02<\/strong><\/td>\n<td style=\"text-align: left;\">192.168.16.8<\/td>\n<td style=\"text-align: left;\">node<\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<h3>\u4e3b\u673a\u914d\u7f6e<\/h3>\n<table>\n<thead>\n<tr>\n<th style=\"text-align: left;\">\u914d\u7f6e<\/th>\n<th style=\"text-align: left;\">\u89c4\u683c<\/th>\n<\/tr>\n<\/thead>\n<tbody>\n<tr>\n<td style=\"text-align: left;\">\u5185\u5b58\u914d\u7f6e<\/td>\n<td style=\"text-align: left;\">2G<\/td>\n<\/tr>\n<tr>\n<td style=\"text-align: left;\">CPU\u914d\u7f6e<\/td>\n<td style=\"text-align: left;\">2\u4e2a<\/td>\n<\/tr>\n<tr>\n<td style=\"text-align: left;\">\u7cfb\u7edf\u7248\u672c<\/td>\n<td style=\"text-align: left;\">CentOS Linux release 7.6.1810 (Core)<\/td>\n<\/tr>\n<tr>\n<td style=\"text-align: left;\">kubelet\u7248\u672c<\/td>\n<td style=\"text-align: left;\">1.14.3<\/td>\n<\/tr>\n<tr>\n<td style=\"text-align: left;\">docker\u7248\u672c<\/td>\n<td style=\"text-align: left;\">docker-ce-18.09.6-3.el7.x86_64<\/td>\n<\/tr>\n<\/tbody>\n<\/table>\n<p>\u7cfb\u7edf\u7248\u672c\u8bf4\u660e<\/p>\n<pre><code>[root@k8s-master ~]# cat \/etc\/redhat-release\nCentOS Linux release 7.6.1810 (Core) \n[root@k8s-master ~]# uname -r\n3.10.0-957.el7.x86_64\n[root@k8s-master ~]# getenforce\nDisabled\n[root@k8s-master ~]# systemctl status firewalld.service\n\u25cf firewalld.service - firewalld - dynamic firewall daemon\n   Loaded: loaded (\/usr\/lib\/systemd\/system\/firewalld.service; disabled; vendor preset: enabled)\n   Active: inactive (dead)\n     Docs: man:firewalld(1)\n[root@k8s-master ~]# <\/code><\/pre>\n<h2>\u90e8\u7f72\u6b65\u9aa4<\/h2>\n<h3>\u6240\u6709\u8282\u70b9\u7684\u57fa\u7840\u914d\u7f6e\uff08\u672c\u8282\u5728master\u4e0enode\u540c\u65f6\u6267\u884c\uff09<\/h3>\n<h4>\u4fee\u6539\u4e3b\u673a\u540d<\/h4>\n<p>192.168.0.8\u670d\u52a1\u5668\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# hostnamectl set-hostname k8s-master\n[root@k8s-master ~]# hostname\nk8s-master<\/code><\/pre>\n<p>192.168.1.8\u670d\u52a1\u5668\uff1a<\/p>\n<pre><code>[root@k8s-node01 ~]# hostnamectl set-hostname k8s-node01\n[root@k8s-node01 ~]# hostname\nk8s-node01<\/code><\/pre>\n<p>192.168.16.8\u670d\u52a1\u5668\uff1a<\/p>\n<pre><code>[root@k8s-node01 ~]# hostnamectl set-hostname k8s-node02\n[root@k8s-node01 ~]# hostname\nk8s-node02<\/code><\/pre>\n<h4>\u4fee\u6539<code>\/etc\/hosts<\/code>\u6587\u4ef6\uff0c\u52a0\u5165\u4e0b\u9762\u4e24\u884c<\/h4>\n<pre><code>192.168.0.8 k8s-master\n192.168.1.8 k8s-node01\n192.168.16.8 k8s-node02<\/code><\/pre>\n<h4>\u5173\u6389swap\u5206\u533a<\/h4>\n<pre><code>[root@k8s-master ~]# swapoff -a<\/code><\/pre>\n<p>\u6c38\u4e45\u7981\u7528swap\u5206\u533a\uff1a\u6ce8\u91ca\u6389<code>\/etc\/fstab<\/code>\u6587\u4ef6\u4e2d<code>\/dev\/mapper\/centos-swap<\/code>\u8fd9\u4e00\u884c\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# cat \/etc\/fstab\n\n#\n# \/etc\/fstab\n# Created by anaconda on Tue Jul 31 23:03:49 2018\n#\n# Accessible filesystems, by reference, are maintained under &#039;\/dev\/disk&#039;\n# See man pages fstab(5), findfs(8), mount(8) and\/or blkid(8) for more info\n#\n\/dev\/mapper\/centos-root \/                       xfs     defaults        0 0\nUUID=ad34d4f1-a758-4924-8ae9-99d0d36939aa \/boot                   xfs     defaults        0 0\n#\/dev\/mapper\/centos-swap swap                    swap    defaults        0 0    #\u6ce8\u91ca\u6389\u8fd9\u4e00\u884c\n[root@k8s-master ~]# <\/code><\/pre>\n<h4>\u5173\u95edfirewalld<\/h4>\n<pre><code>[root@k8s-master ~]# systemctl stop firewalld\n[root@k8s-master ~]# systemctl disable firewalld<\/code><\/pre>\n<h4>\u5173\u95edselinux<\/h4>\n<pre><code>[root@k8s-master selinux]# cat \/etc\/selinux\/config|grep &quot;^SELINUX=&quot;\nSELINUX=disabled\n[root@k8s-master selinux]# <\/code><\/pre>\n<h4>\u4fee\u6539sysctl\u5185\u6838\u53c2\u6570<\/h4>\n<p>\u521b\u5efak8s.conf\u6587\u4ef6\uff0c\u5199\u5165\u4e0b\u9762\u7684\u7684\u5185\u5bb9\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# cat \/etc\/sysctl.d\/k8s.conf      #k8s.conf\u6587\u4ef6\u539f\u6765\u4e0d\u5b58\u5728\uff0c\u9700\u8981\u81ea\u5df1\u521b\u5efa\u7684\n\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.bridge.bridge-nf-call-iptables = 1\nvm.swappiness=0\n\n[root@k8s-master ~]# sysctl --system      #\u8ba9\u53c2\u6570\u4fee\u6539\u751f\u6548<\/code><\/pre>\n<h4>\u914d\u7f6ekubernetes yum\u6e90<\/h4>\n<p>\u521b\u5efa<code>kubernetes.repo<\/code>\u6587\u4ef6\u5e76\u5199\u5165\u4e0b\u9762\u7684\u5185\u5bb9\uff1a<\/p>\n<pre><code># vim \/etc\/yum.repos.d\/kubernetes.repo\n\n[kubernetes]\nname=Kubernetes\nbaseurl=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/repos\/kubernetes-el7-x86_64\/\nenabled=1\ngpgcheck=1\nrepo_gpgcheck=1\ngpgkey=https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/yum-key.gpg https:\/\/mirrors.aliyun.com\/kubernetes\/yum\/doc\/rpm-package-key.gpg<\/code><\/pre>\n<h4>\u5b89\u88c5docker18\u7248\u672c<\/h4>\n<p>Centos7\u5b89\u88c5\u7684docker\u4e00\u822c\u662fdocker13\u7248\u672c\uff0c\u8fd9\u91cc\u662f\u5c06docker\u7248\u672c\u5347\u7ea7\u5230\u6700\u65b0\u7248\u3002\u6b65\u9aa4\u5982\u4e0b\uff1a<\/p>\n<p>\uff081\uff09\u4fdd\u8bc1\u5185\u6838\u7248\u672c\u57283.10\u53ca\u4ee5\u4e0a\uff1a<code>uname -a<\/code><\/p>\n<p>\uff082\uff09\u5220\u9664\u65e7\u7248\u672c\uff1a<code>yum remove -y docker docker-common docker-selinux docker-engine<\/code> #\u8fd9\u4e00\u6b65\u9aa4\u5728\u521d\u6b21\u5b89\u88c5docker\u4e5f\u6700\u597d\u6267\u884c\u4e00\u6b21\uff0c\u5426\u5219\u540e\u9762\u5b89\u88c5docker\u53ef\u80fd\u4f1a\u62a5\u9519<\/p>\n<p>\uff083\uff09\u5b89\u88c5\u9700\u8981\u7684\u8f6f\u4ef6\u5305\uff1a<code>yum install -y yum-utils device-mapper-persistent-data lvm2<\/code><\/p>\n<p>\uff084\uff09\u8bbe\u7f6eDocker yum\u6e90\uff1a<code>yum-config-manager --add-repo https:\/\/download.docker.com\/linux\/centos\/docker-ce.repo<\/code><\/p>\n<p>\uff085\uff09\u67e5\u770b\u6240\u6709\u4ed3\u5e93\u4e2d\u6240\u6709docker\u7248\u672c\uff1a<code>yum list docker-ce --showduplicates | sort -r<\/code><\/p>\n<p>\uff086\uff09\u5b89\u88c5docker\uff1a<code>yum install docker-ce -y<\/code> #\u7531\u4e8erepo\u4e2d\u9ed8\u8ba4\u53ea\u5f00\u542fstable\u4ed3\u5e93\uff0c\u6545\u8fd9\u91cc\u4e00\u822c\u4f1a\u9ed8\u8ba4\u5b89\u88c5\u6700\u65b0\u7248\u672c\u3002\u5982\u679c\u8981\u5b89\u88c5\u7279\u5b9a\u7248\u672c\uff1a<code>yum install docker-ce-18.06.3.ce-3.el7 -y<\/code><\/p>\n<p>\uff087\uff09\u8bbe\u7f6e\u4e3a\u5f00\u673a\u542f\u52a8\uff1a<code>systemctl enable docker<\/code><\/p>\n<p>\uff088\uff09\u542f\u52a8\uff1a<code>systemctl start docker<\/code><\/p>\n<p>\uff089\uff09\u67e5\u770b\u542f\u52a8\u72b6\u6001\uff1a<code>systemctl status docker<\/code><\/p>\n<p>\uff0810\uff09\u67e5\u770b\u7248\u672c\uff1a<code>docker version<\/code><\/p>\n<h4>\u5b89\u88c5kubeadm, kubelet and kubectl<\/h4>\n<p>Kubelet\u8d1f\u8d23\u4e0e\u5176\u4ed6\u8282\u70b9\u96c6\u7fa4\u901a\u4fe1\uff0c\u5e76\u8fdb\u884c\u672c\u8282\u70b9Pod\u548c\u5bb9\u5668\u751f\u547d\u5468\u671f\u7684\u7ba1\u7406<\/p>\n<pre><code># yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes\n# kubelet --version\n# kubeadm version\n# kubectl version<\/code><\/pre>\n<p>\u68c0\u67e5\u6240\u6709\u670d\u52a1\u7248\u672c\uff1a<\/p>\n<pre><code># rpm -qa docker-ce kubelet kubeadm kubectl kubernetes-cni\ndocker-ce-18.09.6-3.el7.x86_64\nkubernetes-cni-0.7.5-0.x86_64\nkubeadm-1.15.0-0.x86_64\nkubelet-1.15.0-0.x86_64\nkubectl-1.15.0-0.x86_64<\/code><\/pre>\n<h4>\u542f\u52a8docker\u548ckubelet\u5e76\u8bbe\u7f6e\u4e3a\u5f00\u673a\u81ea\u542f\u52a8<\/h4>\n<pre><code>systemctl enable docker\nsystemctl enable kubelet\nsystemctl start docker\nsystemctl start kubelet<\/code><\/pre>\n<h4>\u4e0b\u8f7d\u76f8\u5173\u955c\u50cf<\/h4>\n<p>\uff081\uff09\u83b7\u53d6\u955c\u50cf\u5217\u8868<\/p>\n<pre><code>[root@k8s-master ~]# kubeadm config images list\nW0622 14:33:02.478513    3581 version.go:98] could not fetch a Kubernetes version from the internet: unable to get URL &quot;https:\/\/dl.k8s.io\/release\/stable-1.txt&quot;: Get https:\/\/dl.k8s.io\/release\/stable-1.txt: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)\nW0622 14:33:02.478589    3581 version.go:99] falling back to the local client version: v1.15.0\nk8s.gcr.io\/kube-apiserver:v1.15.0\nk8s.gcr.io\/kube-controller-manager:v1.15.0\nk8s.gcr.io\/kube-scheduler:v1.15.0\nk8s.gcr.io\/kube-proxy:v1.15.0\nk8s.gcr.io\/pause:3.1\nk8s.gcr.io\/etcd:3.3.10\nk8s.gcr.io\/coredns:1.3.1<\/code><\/pre>\n<p>\u82e5\u62a5\u5982\u4e0b\u9519\u8bef\uff0c\u5219\u4fee\u6539\u4e3b\u673a\u540d<\/p>\n<pre><code>could not convert cfg to an internal cfg: name: Invalid value: &quot;vm_0_8_centos&quot;: a DNS-1123 subdomain must consist of lower case alphanumeric characters, &#039;-&#039; or &#039;.&#039;, and must start and end with an alphanumeric character (e.g. &#039;example.com&#039;, regex used for validation is &#039;[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*&#039;)<\/code><\/pre>\n<p>\uff082\uff09\u751f\u6210\u9ed8\u8ba4kubeadm.conf\u6587\u4ef6<\/p>\n<pre><code># kubeadm config print init-defaults &gt; kubeadm.conf<\/code><\/pre>\n<p>\uff083\uff09\u4fee\u6539kubeadm.conf\u6587\u4ef6\u7684\u955c\u50cf\u5730\u5740<\/p>\n<p>\u9ed8\u8ba4\u4e3agoogle\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740<code>k8s.gcr.io<\/code>\uff0c\u56fd\u5185\u65e0\u6cd5\u8bbf\u95ee\uff0c\u9700\u8981\u628a\u5730\u5740\u4fee\u6539\u4e3a\u56fd\u5185\u7684\u5730\u5740\uff0c\u8fd9\u91cc\u4f7f\u7528\u963f\u91cc\u4e91\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002<br \/>\n\u7f16\u8f91<code>kubeadm.conf<\/code>\uff0c\u5c06<code>imageRepository<\/code>\u4fee\u6539\u4e3a<code>registry.aliyuncs.com\/google_containers<\/code>\u3002\u5e76\u786e\u8ba4Kubernetes\u7248\u672c\u662fv1.15.0\uff0c\u548c\u524d\u6587\u4e2d\u7684\u955c\u50cf\u5217\u8868\u7684\u7248\u672c\u4fdd\u6301\u4e00\u81f4<\/p>\n<pre><code># vim kubeadm.conf\n\napiVersion: kubeadm.k8s.io\/v1beta1\nbootstrapTokens:\n- groups:\n  - system:bootstrappers:kubeadm:default-node-token\n  token: abcdef.0123456789abcdef\n  ttl: 24h0m0s\n  usages:\n  - signing\n  - authentication\nkind: InitConfiguration\nlocalAPIEndpoint:\n  advertiseAddress: 1.2.3.4\n  bindPort: 6443\nnodeRegistration:\n  criSocket: \/var\/run\/dockershim.sock\n  name: k8s-master\n  taints:\n  - effect: NoSchedule\n    key: node-role.kubernetes.io\/master\n---\napiServer:\n  timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io\/v1beta1\ncertificatesDir: \/etc\/kubernetes\/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: &quot;&quot;\ncontrollerManager: {}\ndns:\n  type: CoreDNS\netcd:\n  local:\n    dataDir: \/var\/lib\/etcd\n#imageRepository: k8s.gcr.io\nimageRepository: registry.aliyuncs.com\/google_containers\nkind: ClusterConfiguration\nkubernetesVersion: v1.15.0\nnetworking:\n  dnsDomain: cluster.local\n  podSubnet: &quot;&quot;\n  serviceSubnet: 10.96.0.0\/12\nscheduler: {}<\/code><\/pre>\n<p>\uff084\uff09\u4e0b\u8f7d\u955c\u50cf\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubeadm config images pull --config kubeadm.conf\n[config\/images] Pulled registry.aliyuncs.com\/google_containers\/kube-apiserver:v1.15.0\n[config\/images] Pulled registry.aliyuncs.com\/google_containers\/kube-controller-manager:v1.15.0\n[config\/images] Pulled registry.aliyuncs.com\/google_containers\/kube-scheduler:v1.15.0\n[config\/images] Pulled registry.aliyuncs.com\/google_containers\/kube-proxy:v1.15.0\n[config\/images] Pulled registry.aliyuncs.com\/google_containers\/pause:3.1\n[config\/images] Pulled registry.aliyuncs.com\/google_containers\/etcd:3.3.10\n[config\/images] Pulled registry.aliyuncs.com\/google_containers\/coredns:1.3.1<\/code><\/pre>\n<p>\uff085\uff09\u4fee\u6539tag<\/p>\n<pre><code>docker tag registry.aliyuncs.com\/google_containers\/kube-apiserver:v1.15.0 k8s.gcr.io\/kube-apiserver:v1.15.0\ndocker tag registry.aliyuncs.com\/google_containers\/kube-controller-manager:v1.15.0 k8s.gcr.io\/kube-controller-manager:v1.15.0\ndocker tag registry.aliyuncs.com\/google_containers\/kube-scheduler:v1.15.0 k8s.gcr.io\/kube-scheduler:v1.15.0\ndocker tag registry.aliyuncs.com\/google_containers\/kube-proxy:v1.15.0 k8s.gcr.io\/kube-proxy:v1.15.0\ndocker tag registry.aliyuncs.com\/google_containers\/pause:3.1 k8s.gcr.io\/pause:3.1\ndocker tag registry.aliyuncs.com\/google_containers\/etcd:3.3.10 k8s.gcr.io\/etcd:3.3.10\ndocker tag registry.aliyuncs.com\/google_containers\/coredns:1.3.1 k8s.gcr.io\/coredns:1.3.1<\/code><\/pre>\n<p>\uff086\uff09\u518d\u5220\u9664\u963f\u91cc\u4e91\u955c\u50cf\uff1a<\/p>\n<pre><code>docker rmi registry.aliyuncs.com\/google_containers\/kube-apiserver:v1.15.0\ndocker rmi registry.aliyuncs.com\/google_containers\/kube-controller-manager:v1.15.0\ndocker rmi registry.aliyuncs.com\/google_containers\/kube-scheduler:v1.15.0\ndocker rmi registry.aliyuncs.com\/google_containers\/kube-proxy:v1.15.0\ndocker rmi registry.aliyuncs.com\/google_containers\/pause:3.1\ndocker rmi registry.aliyuncs.com\/google_containers\/etcd:3.3.10\ndocker rmi registry.aliyuncs.com\/google_containers\/coredns:1.3.1<\/code><\/pre>\n<p>\uff087\uff09\u6216\u8005\u4f7f\u7528\u811a\u672c\u89e3\u51b3\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# cat image.sh\n#!\/bin\/bash\nimages=(kube-proxy:v1.15.0 kube-scheduler:v1.15.0 kube-controller-manager:v1.15.0 kube-apiserver:v1.15.0 etcd:3.3.10 coredns:1.3.1 pause:3.1 )\nfor imageName in ${images[@]} ; do\ndocker pull registry.aliyuncs.com\/google_containers\/$imageName\ndocker tag  registry.aliyuncs.com\/google_containers\/$imageName k8s.gcr.io\/$imageName\ndocker rmi  registry.aliyuncs.com\/google_containers\/$imageName\ndone<\/code><\/pre>\n<p>\uff088\uff09\u6700\u540e\u4e0b\u8f7d\u7559\u4e0b\u7684\u955c\u50cf\u662f\u8fd9\u4e9b\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# docker images\nREPOSITORY                           TAG                 IMAGE ID            CREATED             SIZE\nk8s.gcr.io\/kube-proxy                v1.15.0             d235b23c3570        2 days ago          82.4MB\nk8s.gcr.io\/kube-apiserver            v1.15.0             201c7a840312        2 days ago          207MB\nk8s.gcr.io\/kube-controller-manager   v1.15.0             8328bb49b652        2 days ago          159MB\nk8s.gcr.io\/kube-scheduler            v1.15.0             2d3813851e87        2 days ago          81.1MB\nk8s.gcr.io\/coredns                   1.3.1               eb516548c180        5 months ago        40.3MB\nk8s.gcr.io\/etcd                      3.3.10              2c4adeb21b4f        6 months ago        258MB\nk8s.gcr.io\/pause                     3.1                 da86e6ba6ca1        18 months ago       742kB\n[root@k8s-master ~]# <\/code><\/pre>\n<h4>\u5ffd\u7565swap\u9519\u8bef<\/h4>\n<p>kubernetes\u96c6\u7fa4\u4e0d\u5141\u8bb8\u5f00\u542fswap\uff0c\u6240\u4ee5\u6211\u4eec\u9700\u8981\u5ffd\u7565\u8fd9\u4e2a\u9519\u8bef<br \/>\n\u7f16\u8f91\u6587\u4ef6 <code>\/etc\/sysconfig\/kubelet<\/code>\uff0c\u5c06\u6587\u4ef6\u91cc\u7684<code>KUBELET_EXTRA_ARGS=<\/code>\u6539\u6210\uff1a<code>KUBELET_EXTRA_ARGS=&quot;--fail-swap-on=false&quot;<\/code><\/p>\n<p>\u4fee\u6539\u4e4b\u540e\u7684\u6587\u4ef6\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# cat \/etc\/sysconfig\/kubelet\nKUBELET_EXTRA_ARGS=&quot;--fail-swap-on=false&quot;\n[root@k8s-master ~]# <\/code><\/pre>\n<h2>master\u8282\u70b9\u90e8\u7f72\uff08\u672c\u8282\u5728master\u8282\u70b9\u6267\u884c\uff09<\/h2>\n<h3>\u521d\u59cb\u5316Kubernetes Master<\/h3>\n<p>\u6839\u636e\u540e\u9762\u8981\u5b89\u88c5\u7684Calico\u7f51\u7edc\u7ec4\u4ef6\uff1a<a target=\"_blank\" rel=\"noopener\" href=\"https:\/\/v1-12.docs.kubernetes.io\/docs\/setup\/independent\/create-cluster-kubeadm\/#pod-network\">https:\/\/v1-12.docs.kubernetes.io\/docs\/setup\/independent\/create-cluster-kubeadm\/#pod-network<\/a><\/p>\n<p>\u8fd9\u91cc\u5b9a\u4e49\u5148POD\u7684\u7f51\u6bb5\u4e3a: <code>172.16.0.0\/16<\/code>\uff0cAPI Server\u5730\u5740\u4e3aMaster\u8282\u70b9\u7684IP\u5730\u5740\u3002\u547d\u4ee4\uff1a<\/p>\n<pre><code>kubeadm init --kubernetes-version=v1.15.0 --pod-network-cidr=172.16.0.0\/16 --apiserver-advertise-address=192.168.0.8<\/code><\/pre>\n<p>\u6267\u884c\u7ed3\u679c\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubeadm init --kubernetes-version=v1.15.0 --pod-network-cidr=172.16.0.0\/16 --apiserver-advertise-address=192.168.0.8\n[init] Using Kubernetes version: v1.15.0\n[preflight] Running pre-flight checks\n    [WARNING IsDockerSystemdCheck]: detected &quot;cgroupfs&quot; as the Docker cgroup driver. The recommended driver is &quot;systemd&quot;. Please follow the guide at https:\/\/kubernetes.io\/docs\/setup\/cri\/\n[preflight] Pulling images required for setting up a Kubernetes cluster\n[preflight] This might take a minute or two, depending on the speed of your internet connection\n[preflight] You can also perform this action in beforehand using &#039;kubeadm config images pull&#039;\n[kubelet-start] Writing kubelet environment file with flags to file &quot;\/var\/lib\/kubelet\/kubeadm-flags.env&quot;\n[kubelet-start] Writing kubelet configuration to file &quot;\/var\/lib\/kubelet\/config.yaml&quot;\n[kubelet-start] Activating the kubelet service\n[certs] Using certificateDir folder &quot;\/etc\/kubernetes\/pki&quot;\n[certs] Generating &quot;etcd\/ca&quot; certificate and key\n[certs] Generating &quot;etcd\/healthcheck-client&quot; certificate and key\n[certs] Generating &quot;apiserver-etcd-client&quot; certificate and key\n[certs] Generating &quot;etcd\/server&quot; certificate and key\n[certs] etcd\/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.0.8 127.0.0.1 ::1]\n[certs] Generating &quot;etcd\/peer&quot; certificate and key\n[certs] etcd\/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.0.8 127.0.0.1 ::1]\n[certs] Generating &quot;ca&quot; certificate and key\n[certs] Generating &quot;apiserver&quot; certificate and key\n[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.8]\n[certs] Generating &quot;apiserver-kubelet-client&quot; certificate and key\n[certs] Generating &quot;front-proxy-ca&quot; certificate and key\n[certs] Generating &quot;front-proxy-client&quot; certificate and key\n[certs] Generating &quot;sa&quot; key and public key\n[kubeconfig] Using kubeconfig folder &quot;\/etc\/kubernetes&quot;\n[kubeconfig] Writing &quot;admin.conf&quot; kubeconfig file\n[kubeconfig] Writing &quot;kubelet.conf&quot; kubeconfig file\n[kubeconfig] Writing &quot;controller-manager.conf&quot; kubeconfig file\n[kubeconfig] Writing &quot;scheduler.conf&quot; kubeconfig file\n[control-plane] Using manifest folder &quot;\/etc\/kubernetes\/manifests&quot;\n[control-plane] Creating static Pod manifest for &quot;kube-apiserver&quot;\n[control-plane] Creating static Pod manifest for &quot;kube-controller-manager&quot;\n[control-plane] Creating static Pod manifest for &quot;kube-scheduler&quot;\n[etcd] Creating static Pod manifest for local etcd in &quot;\/etc\/kubernetes\/manifests&quot;\n[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory &quot;\/etc\/kubernetes\/manifests&quot;. This can take up to 4m0s\n[kubelet-check] Initial timeout of 40s passed.\n[apiclient] All control plane components are healthy after 40.013221 seconds\n[upload-config] Storing the configuration used in ConfigMap &quot;kubeadm-config&quot; in the &quot;kube-system&quot; Namespace\n[kubelet] Creating a ConfigMap &quot;kubelet-config-1.15&quot; in namespace kube-system with the configuration for the kubelets in the cluster\n[upload-certs] Skipping phase. Please see --upload-certs\n[mark-control-plane] Marking the node k8s-master as control-plane by adding the label &quot;node-role.kubernetes.io\/master=&#039;&#039;&quot;\n[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io\/master:NoSchedule]\n[bootstrap-token] Using token: aakoqt.zq79agckuzc5wt29\n[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles\n[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials\n[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token\n[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster\n[bootstrap-token] Creating the &quot;cluster-info&quot; ConfigMap in the &quot;kube-public&quot; namespace\n[addons] Applied essential addon: CoreDNS\n[addons] Applied essential addon: kube-proxy\n\nYour Kubernetes control-plane has initialized successfully!\n\nTo start using your cluster, you need to run the following as a regular user:\n\n  mkdir -p $HOME\/.kube\n  sudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\n  sudo chown $(id -u):$(id -g) $HOME\/.kube\/config\n\nYou should now deploy a pod network to the cluster.\nRun &quot;kubectl apply -f [podnetwork].yaml&quot; with one of the options listed at:\n  https:\/\/kubernetes.io\/docs\/concepts\/cluster-administration\/addons\/\n\nThen you can join any number of worker nodes by running the following on each as root:\n\nkubeadm join 192.168.0.8:6443 --token aakoqt.zq79agckuzc5wt29 \\\n    --discovery-token-ca-cert-hash sha256:fa781d59e6e69d1a8abb836f66e3d36fd2d3e1765a1afdb71b1a18af807585c2 \n[root@k8s-master ~]# <\/code><\/pre>\n<p>\u521d\u59cb\u5316\u6210\u529f\u540e\uff0c\u5c06\u6700\u540e\u4e24\u884c\u5185\u5bb9\u8bb0\u5f55\u4e0b\u6765\uff0c\u8fd9\u4e2a\u547d\u4ee4\u7528\u6765\u52a0\u5165Worker\u8282\u70b9\u65f6\u4f7f\u7528\u3002<\/p>\n<pre><code>kubeadm join 192.168.0.8:6443 --token aakoqt.zq79agckuzc5wt29 \\\n    --discovery-token-ca-cert-hash sha256:fa781d59e6e69d1a8abb836f66e3d36fd2d3e1765a1afdb71b1a18af807585c2<\/code><\/pre>\n<p>\u82e5\u62a5\u5982\u4e0b\u9519\u8bef\uff1a<\/p>\n<pre><code>[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: \/proc\/sys\/net\/bridge\/bridge-nf-call-iptables contents are not set to 1<\/code><\/pre>\n<p>\u89e3\u51b3\uff1a<\/p>\n<pre><code># echo &quot;1&quot; &gt;\/proc\/sys\/net\/bridge\/bridge-nf-call-iptables<\/code><\/pre>\n<h3>\u914d\u7f6ekubectl\uff0c\u4f5c\u4e3a\u666e\u901a\u7528\u6237\u7ba1\u7406\u96c6\u7fa4\u5e76\u5728\u96c6\u7fa4\u4e0a\u5de5\u4f5c<\/h3>\n<p>\u4e0a\u4e00\u6b65\u9aa4\u521d\u59cb\u5316\u7684\u8981\u6c42\uff1a\u201cTo start using your cluster, you need to run the following as a regular user\u201d\uff0c\u9700\u8981\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a<\/p>\n<pre><code>mkdir -p $HOME\/.kube\nsudo cp -i \/etc\/kubernetes\/admin.conf $HOME\/.kube\/config\nsudo chown $(id -u):$(id -g) $HOME\/.kube\/config<\/code><\/pre>\n<h3>\u83b7\u53d6pods\u5217\u8868<\/h3>\n<p><code>kubectl get pods --all-namespaces<\/code>\u547d\u4ee4\u67e5\u770b\u76f8\u5173\u72b6\u6001\uff0c\u53ef\u4ee5\u770b\u5230<code>coredns pod<\/code>\u5904\u4e8epending\u72b6\u6001\uff0c\u8fd9\u662f\u56e0\u4e3a\u8fd8\u6ca1\u6709\u90e8\u7f72pod\u7f51\u7edc\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubectl get pods --all-namespaces\nNAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE\nkube-system   coredns-fb8b8dccf-twknr              0\/1     Pending   0          3m25s\nkube-system   coredns-fb8b8dccf-wc4pd              0\/1     Pending   0          3m25s\nkube-system   etcd-k8s-master                      1\/1     Running   0          2m25s\nkube-system   kube-apiserver-k8s-master            1\/1     Running   0          2m30s\nkube-system   kube-controller-manager-k8s-master   1\/1     Running   0          2m27s\nkube-system   kube-proxy-b298g                     1\/1     Running   0          3m25s\nkube-system   kube-scheduler-k8s-master            1\/1     Running   0          2m37s<\/code><\/pre>\n<h3>\u67e5\u770b\u96c6\u7fa4\u7684\u5065\u5eb7\u72b6\u6001<\/h3>\n<p><code>kubectl get cs<\/code>\u547d\u4ee4\u67e5\u770b\u5065\u5eb7\u72b6\u6001\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubectl get cs\nNAME                 STATUS    MESSAGE             ERROR\nscheduler            Healthy   ok                  \ncontroller-manager   Healthy   ok                  \netcd-0               Healthy   {&quot;health&quot;:&quot;true&quot;} <\/code><\/pre>\n<h3>\u90e8\u7f72Pod\u7f51\u7edc<\/h3>\n<p>\u6839\u636ePod Network Add-on\u4ecb\u7ecd\uff1a<\/p>\n<blockquote>\n<p>You must install a pod network add-on so that your pods can communicate with each other.<br \/>\n\u60a8\u5fc5\u987b\u5b89\u88c5\u4e00\u4e2apod\u7f51\u7edc\u9644\u52a0\u7ec4\u4ef6\uff0c\u4ee5\u4fbf\u60a8\u7684pod\u53ef\u4ee5\u5f7c\u6b64\u901a\u4fe1\u3002<br \/>\nThe network must be deployed before any applications. Also, CoreDNS will not start up before a network is installed. kubeadm only supports Container Network Interface (CNI) based networks (and does not support kubenet).<br \/>\npod\u7f51\u7edc\u9644\u52a0\u7ec4\u4ef6\u662f\u5fc5\u987b\u5b89\u88c5\u7684\uff0c\u8fd9\u6837pod\u80fd\u591f\u5f7c\u6b64\u901a\u4fe1\uff0c\u800c\u4e14\u7f51\u7edc\u5fc5\u987b\u5728\u4efb\u4f55\u5e94\u7528\u7a0b\u5e8f\u4e4b\u524d\u90e8\u7f72\u3002\u53e6\u5916\uff0cCoreDNS\u5728\u5b89\u88c5\u7f51\u7edc\u4e4b\u524d\u4e0d\u4f1a\u542f\u52a8\u3002kubeadm\u53ea\u652f\u6301\u57fa\u4e8e\u5bb9\u5668\u7f51\u7edc\u63a5\u53e3(CNI)\u7684\u7f51\u7edc\u3002<\/p>\n<\/blockquote>\n<p>\u5982\u4e0b\u56fe\u652f\u6301\u7684Pod\u7f51\u7edc\u6709<code>JuniperContrail\/TungstenFabric<\/code>\u3001<code>Calico<\/code>\u3001<code>Canal<\/code>\u3001<code>Cilium<\/code>\u3001<code>Flannel<\/code>\u3001<code>Kube-router<\/code>\u3001<code>Romana<\/code>\u3001<code>Wave Net<\/code>\u7b49\uff1a<\/p>\n<p>\u8fd9\u91cc\u6211\u4eec\u90e8\u7f72Calico\u7f51\u7edc\uff0cCalico\u662f\u4e00\u4e2a\u7eaf\u4e09\u5c42\u7684\u65b9\u6848\uff0c\u5176\u597d\u5904\u662f\u5b83\u6574\u5408\u4e86\u5404\u79cd\u4e91\u539f\u751f\u5e73\u53f0(Docker\u3001Mesos \u4e0e OpenStack \u7b49)\uff0c\u6bcf\u4e2a Kubernetes \u8282\u70b9\u4e0a\u901a\u8fc7 Linux Kernel \u73b0\u6709\u7684 L3 forwarding \u529f\u80fd\u6765\u5b9e\u73b0 vRouter \u529f\u80fd\u3002<\/p>\n<p>\u6839\u636ePod Network Add-on\u63d0\u793a\uff0c\u5b89\u88c5Calico\u7f51\u7edc\u5c31\u4e24\u4e2a\u6b65\u9aa4\uff1a<\/p>\n<p><a target=\"_blank\" rel=\"noopener\" href=\"https:\/\/v1-12.docs.kubernetes.io\/docs\/setup\/independent\/create-cluster-kubeadm\/#pod-network\">https:\/\/v1-12.docs.kubernetes.io\/docs\/setup\/independent\/create-cluster-kubeadm\/#pod-network<\/a><\/p>\n<pre><code>wget https:\/\/docs.projectcalico.org\/v3.3\/getting-started\/kubernetes\/installation\/hosted\/rbac-kdd.yaml\nwget https:\/\/docs.projectcalico.org\/v3.3\/getting-started\/kubernetes\/installation\/hosted\/kubernetes-datastore\/calico-networking\/1.7\/calico.yaml\ncurl https:\/\/docs.projectcalico.org\/v3.4\/getting-started\/kubernetes\/installation\/hosted\/calico.yaml -O<\/code><\/pre>\n<p>\u5982\u679c<code>pod CIDR<\/code>\u5373<code>pod-network-cidr<\/code>\u4f7f\u7528\u7684\u662f 192.168.0.0\/16\uff0c\u53ef\u4ee5\u8df3\u8fc7\uff0c\u5426\u5219\u6267\u884c\u4e0b\u9762\u547d\u4ee4\uff0c\u66f4\u65b0\u4e00\u4e0b<code>pod CIDR<\/code>\u7684\u914d\u7f6e\u4fe1\u606f\uff1a<\/p>\n<pre><code>POD_CIDR=&quot;&lt;your-pod-cidr&gt;&quot; \\\nsed -i -e &quot;s?192.168.0.0\/16?$POD_CIDR?g&quot; calico.yaml<\/code><\/pre>\n<p>\u6216\u76f4\u63a5\u7f16\u8f91<code>calico.yaml<\/code><\/p>\n<pre><code># The default IPv4 pool to create on startup if none exists. Pod IPs will be\n# chosen from this range. Changing this value after installation will have\n# no effect. This should fall within `--cluster-cidr`.\n- name: CALICO_IPV4POOL_CIDR\n  value: &quot;172.16.0.0\/16&quot;<\/code><\/pre>\n<pre><code>kubectl apply -f rbac-kdd.yaml\nkubectl apply -f calico.yaml<\/code><\/pre>\n<p>\u8fd9\u4e24\u4e2a\u6b65\u9aa4\u7684\u6267\u884c\u7ed3\u679c\u5982\u4e0b\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubectl apply -f rbac-kdd.yaml\nclusterrole.rbac.authorization.k8s.io\/calico-node created\nclusterrolebinding.rbac.authorization.k8s.io\/calico-node created\n[root@k8s-master ~]# kubectl apply -f calico.yaml\nconfigmap\/calico-config created\nservice\/calico-typha created\ndeployment.apps\/calico-typha created\npoddisruptionbudget.policy\/calico-typha created\ndaemonset.extensions\/calico-node created\nserviceaccount\/calico-node created\ncustomresourcedefinition.apiextensions.k8s.io\/felixconfigurations.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/bgppeers.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/bgpconfigurations.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/ippools.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/hostendpoints.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/clusterinformations.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/globalnetworkpolicies.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/globalnetworksets.crd.projectcalico.org created\ncustomresourcedefinition.apiextensions.k8s.io\/networkpolicies.crd.projectcalico.org created\n[root@k8s-master ~]# <\/code><\/pre>\n<p>\u4f7f\u7528<code>kubectl get pods --all-namespaces<\/code>\u547d\u4ee4\u67e5\u770b\u8fd0\u884c\u72b6\u6001<\/p>\n<p>\u8fd8\u6ca1\u90e8\u7f72\u597d\u7684\u65f6\u5019\uff0c\u72b6\u6001\u662f\u8fd9\u6837\u7684\uff0c\u9700\u8981\u7b49\u4e2a\u51e0\u5206\u949f\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubectl get pods --all-namespaces\nNAMESPACE     NAME                                 READY   STATUS              RESTARTS   AGE\nkube-system   calico-node-nzqzl                    0\/2     ContainerCreating   0          82s\nkube-system   coredns-fb8b8dccf-twknr              0\/1     Pending             0          83m\nkube-system   coredns-fb8b8dccf-wc4pd              0\/1     Pending             0          83m\nkube-system   etcd-k8s-master                      1\/1     Running             0          82m\nkube-system   kube-apiserver-k8s-master            1\/1     Running             0          82m\nkube-system   kube-controller-manager-k8s-master   1\/1     Running             0          82m\nkube-system   kube-proxy-b298g                     1\/1     Running             0          83m\nkube-system   kube-scheduler-k8s-master            1\/1     Running             0          82m<\/code><\/pre>\n<p>\u51e0\u5206\u949f\u4e4b\u540e\uff0c\u6240\u6709\u5bb9\u5668\u5c31\u53d8\u6210\u4e86running\u72b6\u6001\uff0c\u53ef\u4ee5\u8fdb\u884c\u4e0b\u4e00\u6b65\u4e86\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubectl get pods --all-namespaces\nNAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE\nkube-system   calico-node-nzqzl                    2\/2     Running   0          13m\nkube-system   coredns-fb8b8dccf-twknr              1\/1     Running   0          95m\nkube-system   coredns-fb8b8dccf-wc4pd              1\/1     Running   0          95m\nkube-system   etcd-k8s-master                      1\/1     Running   0          94m\nkube-system   kube-apiserver-k8s-master            1\/1     Running   0          94m\nkube-system   kube-controller-manager-k8s-master   1\/1     Running   0          94m\nkube-system   kube-proxy-b298g                     1\/1     Running   0          95m\nkube-system   kube-scheduler-k8s-master            1\/1     Running   0          94m<\/code><\/pre>\n<h3>worker\u8282\u70b9\u52a0\u5165\uff08\u672c\u8282\u5728worker\u8282\u70b9\u64cd\u4f5c\uff09<\/h3>\n<p>\u5728master\u8282\u70b9\u4e0a\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u6240\u6709\u8282\u70b9\uff0c\u53ea\u6709master<\/p>\n<pre><code>kubectl get nodes<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl get nodes\nNAME         STATUS   ROLES    AGE     VERSION\nk8s-master   Ready    master   3m22s   v1.15.0<\/code><\/pre>\n<h3>\u5728worker\u8282\u70b9\u4e0a\u5c06Worker\u8282\u70b9\u52a0\u5165\u96c6\u7fa4<\/h3>\n<p>\u5728Worker\u8282\u70b9\u4e0a\u8fd0\u884c<code>Kubernetes Master<\/code>\u521d\u59cb\u5316\u65f6\u751f\u6210\u7684\u4ee4\u724c\uff1a<\/p>\n<pre><code>[root@k8s-node01 ~]# kubeadm join 192.168.0.8:6443 --token aakoqt.zq79agckuzc5wt29 \\\n    --discovery-token-ca-cert-hash sha256:fa781d59e6e69d1a8abb836f66e3d36fd2d3e1765a1afdb71b1a18af807585c2<\/code><\/pre>\n<pre><code>[root@k8s-node01 ~]# kubeadm join 192.168.0.8:6443 --token aakoqt.zq79agckuzc5wt29 \\\n>     --discovery-token-ca-cert-hash sha256:fa781d59e6e69d1a8abb836f66e3d36fd2d3e1765a1afdb71b1a18af807585c2\n[preflight] Running pre-flight checks\n    [WARNING IsDockerSystemdCheck]: detected &quot;cgroupfs&quot; as the Docker cgroup driver. The recommended driver is &quot;systemd&quot;. Please follow the guide at https:\/\/kubernetes.io\/docs\/setup\/cri\/\n[preflight] Reading configuration from the cluster...\n[preflight] FYI: You can look at this config file with &#039;kubectl -n kube-system get cm kubeadm-config -oyaml&#039;\n[kubelet-start] Downloading configuration for the kubelet from the &quot;kubelet-config-1.15&quot; ConfigMap in the kube-system namespace\n[kubelet-start] Writing kubelet configuration to file &quot;\/var\/lib\/kubelet\/config.yaml&quot;\n[kubelet-start] Writing kubelet environment file with flags to file &quot;\/var\/lib\/kubelet\/kubeadm-flags.env&quot;\n[kubelet-start] Activating the kubelet service\n[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...\n\nThis node has joined the cluster:\n* Certificate signing request was sent to apiserver and a response was received.\n* The Kubelet was informed of the new secure connection details.\n\nRun &#039;kubectl get nodes&#039; on the control-plane to see this node join the cluster.\n\n[root@k8s-node01 ~]# <\/code><\/pre>\n<h3>\u5728master\u8282\u70b9\u4e0a\u68c0\u67e5\u52a0\u5165\u7ed3\u679c<\/h3>\n<p>\u518d\u56de\u5230master\u8282\u70b9\u4e0a\u53bb\u67e5\u770b\u6240\u6709node\uff0c\u53d1\u73b0\u5c31\u591a\u4e86\u4e00\u4e2a\u8282\u70b9\uff1a<\/p>\n<pre><code>kubectl get nodes<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl get nodes\nNAME         STATUS      ROLES    AGE     VERSION\nk8s-master   Ready       master   7h7m    v1.15.0\nk8s-node01   Ready       &lt;none&gt;   3m41s   v1.15.0\nk8s-node02   Ready       &lt;none&gt;   7h1m    v1.15.0<\/code><\/pre>\n<p>\u8865\u5145\uff1a\u5f53\u5728worker\u8282\u70b9\u4e0a\u521a\u521a\u6267\u884c\u5b8c\u52a0\u5165\u96c6\u7fa4\u7684\u4ee4\u724c\u4e4b\u540e\uff0c\u4e2d\u95f4\u6709\u51fa\u73b0ContainerCreating\u7684\u72b6\u6001\uff0c\u7b49\u51e0\u5206\u949f\u518d\u770b\uff0c\u5c31\u5df2\u7ecfOK\u4e86<\/p>\n<pre><code>[root@k8s-master ~]# kubectl get pods --all-namespaces\nNAMESPACE     NAME                                 READY   STATUS              RESTARTS   AGE\nkube-system   calico-node-d4ksg                    0\/2     ContainerCreating   0          3m13s\nkube-system   calico-node-l6gsm                    2\/2     Running             0          31m\nkube-system   calico-node-lrszl                    2\/2     Running             0          31m\nkube-system   coredns-5c98db65d4-5tmc4             1\/1     Running             0          113m\nkube-system   coredns-5c98db65d4-9srmt             1\/1     Running             0          113m\nkube-system   etcd-k8s-master                      1\/1     Running             0          112m\nkube-system   kube-apiserver-k8s-master            1\/1     Running             0          112m\nkube-system   kube-controller-manager-k8s-master   1\/1     Running             0          112m\nkube-system   kube-proxy-dkpmj                     1\/1     Running             0          113m\nkube-system   kube-proxy-gm9nr                     0\/1     ContainerCreating   0          3m13s\nkube-system   kube-proxy-vswzq                     0\/1     Running             0          3m13s\nkube-system   kube-scheduler-k8s-master            1\/1     Running             0          112m<\/code><\/pre>\n<p>\u7b49\u51e0\u5206\u949f\u67e5\u770bpod\u72b6\u6001\uff0c\u5c31OK\u4e86<\/p>\n<pre><code>[root@k8s-master ~]# kubectl get pods --all-namespaces\nNAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE\nkube-system   calico-node-d4ksg                    2\/2     Running   0          4m27s\nkube-system   calico-node-l6gsm                    2\/2     Running   2          7h5m\nkube-system   calico-node-lrszl                    1\/2     Running   0          7h2m\nkube-system   coredns-5c98db65d4-5tmc4             1\/1     Running   1          7h7m\nkube-system   coredns-5c98db65d4-9srmt             1\/1     Running   1          7h7m\nkube-system   etcd-k8s-master                      1\/1     Running   1          7h6m\nkube-system   kube-apiserver-k8s-master            1\/1     Running   1          7h7m\nkube-system   kube-controller-manager-k8s-master   1\/1     Running   1          7h7m\nkube-system   kube-proxy-dkpmj                     1\/1     Running   0          4m27s\nkube-system   kube-proxy-gm9nr                     1\/1     Running   0          7h2m\nkube-system   kube-proxy-vswzq                     1\/1     Running   1          7h7m\nkube-system   kube-scheduler-k8s-master            1\/1     Running   1          7h7m<\/code><\/pre>\n<p>\u518d\u67e5\u770b\u8282\u70b9\uff0cnode\u8282\u70b9\u5df2\u7ecf\u5c31\u662fready\u72b6\u6001\u4e86\uff1a<\/p>\n<pre><code>[root@k8s-master ~]# kubectl get nodes\nNAME         STATUS   ROLES    AGE     VERSION\nk8s-master   Ready    master   7h7m    v1.15.0\nk8s-node01   Ready    &lt;none&gt;   3m41s   v1.15.0\nk8s-node02   Ready    &lt;none&gt;   7h1m    v1.15.0<\/code><\/pre>\n<p>\u5982\u8282\u70b9\u4e00\u76f4\u5904\u4e8e<code>NotReady<\/code>\u72b6\u6001\uff0cpod\u4e00\u76f4\u5904\u4e8e<code>ContainerCreating<\/code>\u72b6\u6001\uff0c\u5219\u4f7f\u7528<code>kubectl describe pod<\/code>\u68c0\u67e5\uff0c\u5373\u53ef\u67e5\u8be2pod\u9519\u8bef\u4fe1\u606f<\/p>\n<p>\u9519\u8bef\u4fee\u590d\u5b8c\u6210\u540e\uff0c\u9700\u8981\u5728Node\u8282\u70b9\u91cd\u542fDocker<\/p>\n<pre><code>systemctl restart docker<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl describe pod calico-node-2kw7x --namespace=kube-system\nName:               calico-node-2kw7x\nNamespace:          kube-system\nPriority:           0\nPriorityClassName:  &lt;none&gt;\nNode:               k8s-node01\/192.168.0.10\nStart Time:         Tue, 11 Jun 2019 22:18:35 +0800\nLabels:             controller-revision-hash=7d4b8c9897\n                    k8s-app=calico-node\n                    pod-template-generation=1\nAnnotations:        scheduler.alpha.kubernetes.io\/critical-pod: \nStatus:             Pending\nIP:                 192.168.0.10\nControlled By:      DaemonSet\/calico-node\nContainers:\n  calico-node:\n    Container ID:   \n    Image:          calico\/node:v3.3.6\n    Image ID:       \n    Port:           &lt;none&gt;\n    Host Port:      &lt;none&gt;\n    State:          Waiting\n      Reason:       ContainerCreating\n    Ready:          False\n    Restart Count:  0\n    Requests:\n      cpu:      250m\n    Liveness:   http-get http:\/\/localhost:9099\/liveness delay=10s timeout=1s period=10s #success=1 #failure=6\n    Readiness:  exec [\/bin\/calico-node -bird-ready -felix-ready] delay=0s timeout=1s period=10s #success=1 #failure=3\n    Environment:\n      DATASTORE_TYPE:                     kubernetes\n      FELIX_TYPHAK8SSERVICENAME:          &lt;set to the key &#039;typha_service_name&#039; of config map &#039;calico-config&#039;&gt;  Optional: false\n      WAIT_FOR_DATASTORE:                 true\n      NODENAME:                            (v1:spec.nodeName)\n      CALICO_NETWORKING_BACKEND:          &lt;set to the key &#039;calico_backend&#039; of config map &#039;calico-config&#039;&gt;  Optional: false\n      CLUSTER_TYPE:                       k8s,bgp\n      IP:                                 autodetect\n      CALICO_IPV4POOL_IPIP:               Always\n      FELIX_IPINIPMTU:                    &lt;set to the key &#039;veth_mtu&#039; of config map &#039;calico-config&#039;&gt;  Optional: false\n      CALICO_IPV4POOL_CIDR:               192.168.0.0\/16\n      CALICO_DISABLE_FILE_LOGGING:        true\n      FELIX_DEFAULTENDPOINTTOHOSTACTION:  ACCEPT\n      FELIX_IPV6SUPPORT:                  false\n      FELIX_LOGSEVERITYSCREEN:            info\n      FELIX_HEALTHENABLED:                true\n    Mounts:\n      \/lib\/modules from lib-modules (ro)\n      \/run\/xtables.lock from xtables-lock (rw)\n      \/var\/lib\/calico from var-lib-calico (rw)\n      \/var\/run\/calico from var-run-calico (rw)\n      \/var\/run\/secrets\/kubernetes.io\/serviceaccount from calico-node-token-8bjw5 (ro)\n  install-cni:\n    Container ID:  \n    Image:         calico\/cni:v3.3.6\n    Image ID:      \n    Port:          &lt;none&gt;\n    Host Port:     &lt;none&gt;\n    Command:\n      \/install-cni.sh\n    State:          Waiting\n      Reason:       ContainerCreating\n    Ready:          False\n    Restart Count:  0\n    Environment:\n      CNI_CONF_NAME:         10-calico.conflist\n      KUBERNETES_NODE_NAME:   (v1:spec.nodeName)\n      CNI_NETWORK_CONFIG:    &lt;set to the key &#039;cni_network_config&#039; of config map &#039;calico-config&#039;&gt;  Optional: false\n      CNI_MTU:               &lt;set to the key &#039;veth_mtu&#039; of config map &#039;calico-config&#039;&gt;            Optional: false\n    Mounts:\n      \/host\/etc\/cni\/net.d from cni-net-dir (rw)\n      \/host\/opt\/cni\/bin from cni-bin-dir (rw)\n      \/var\/run\/secrets\/kubernetes.io\/serviceaccount from calico-node-token-8bjw5 (ro)\nConditions:\n  Type              Status\n  Initialized       True \n  Ready             False \n  ContainersReady   False \n  PodScheduled      True \nVolumes:\n  lib-modules:\n    Type:          HostPath (bare host directory volume)\n    Path:          \/lib\/modules\n    HostPathType:  \n  var-run-calico:\n    Type:          HostPath (bare host directory volume)\n    Path:          \/var\/run\/calico\n    HostPathType:  \n  var-lib-calico:\n    Type:          HostPath (bare host directory volume)\n    Path:          \/var\/lib\/calico\n    HostPathType:  \n  xtables-lock:\n    Type:          HostPath (bare host directory volume)\n    Path:          \/run\/xtables.lock\n    HostPathType:  FileOrCreate\n  cni-bin-dir:\n    Type:          HostPath (bare host directory volume)\n    Path:          \/opt\/cni\/bin\n    HostPathType:  \n  cni-net-dir:\n    Type:          HostPath (bare host directory volume)\n    Path:          \/etc\/cni\/net.d\n    HostPathType:  \n  calico-node-token-8bjw5:\n    Type:        Secret (a volume populated by a Secret)\n    SecretName:  calico-node-token-8bjw5\n    Optional:    false\nQoS Class:       Burstable\nNode-Selectors:  beta.kubernetes.io\/os=linux\nTolerations:     :NoSchedule\n                 :NoExecute\n                 CriticalAddonsOnly\n                 node.kubernetes.io\/disk-pressure:NoSchedule\n                 node.kubernetes.io\/memory-pressure:NoSchedule\n                 node.kubernetes.io\/network-unavailable:NoSchedule\n                 node.kubernetes.io\/not-ready:NoExecute\n                 node.kubernetes.io\/pid-pressure:NoSchedule\n                 node.kubernetes.io\/unreachable:NoExecute\n                 node.kubernetes.io\/unschedulable:NoSchedule\nEvents:\n  Type     Reason                  Age                     From                 Message\n  ----     ------                  ----                    ----                 -------\n  Warning  FailedCreatePodSandBox  3m24s (x1445 over 11h)  kubelet, k8s-node01  Failed create pod sandbox: rpc error: code = Unknown desc = failed pulling image &quot;k8s.gcr.io\/pause:3.1&quot;: Error response from daemon: Get https:\/\/k8s.gcr.io\/v2\/: net\/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)\n[root@k8s-master ~]#<\/code><\/pre>\n<blockquote>\n<p>k8s\u7684pod\u9ed8\u8ba4\u4e0d\u4f1a\u8c03\u5ea6\u5230master\u8282\u70b9\uff0c\u5982\u679c\u90e8\u7f72\u7684\u662f\u5355\u8282\u70b9\u7684\u96c6\u7fa4\uff0c\u5c31\u9700\u8981\u6309\u7167\u4e0b\u9762\u7684\u65b9\u5f0f\u4fee\u6539pod\u7684\u8c03\u5ea6\u7b56\u7565\uff08<a target=\"_blank\" rel=\"noopener\" href=\"https:\/\/v1-12.docs.kubernetes.io\/docs\/setup\/independent\/create-cluster-kubeadm\/#pod-network\">https:\/\/v1-12.docs.kubernetes.io\/docs\/setup\/independent\/create-cluster-kubeadm\/#pod-network<\/a>\uff09\uff1a<\/p>\n<\/blockquote>\n<p><a target=\"_blank\" rel=\"noopener\" href=\"https:\/\/s1.51cto.com\/images\/blog\/201904\/03\/7b2d05c0c6e877850e9e35990b6bccf7.png?x-oss-process=image\/watermark,size_16,text_QDUxQ1RP5Y2a5a6i,color_FFFFFF,t_100,g_se,x_10,y_10,shadow_90,type_ZmFuZ3poZW5naGVpdGk\">https:\/\/s1.51cto.com\/images\/blog\/201904\/03\/7b2d05c0c6e877850e9e35990b6bccf7.png?x-oss-process=image\/watermark,size_16,text_QDUxQ1RP5Y2a5a6i,color_FFFFFF,t_100,g_se,x_10,y_10,shadow_90,type_ZmFuZ3poZW5naGVpdGk<\/a>=<\/p>\n<h2>\u90e8\u7f72dashboard<\/h2>\n<p>\u5173\u4e8edashboard\u7684\u4ecb\u7ecd\u548c\u90e8\u7f72\u65b9\u5f0f\u53ef\u53c2\u8003\uff1a<a target=\"_blank\" rel=\"noopener\" href=\"https:\/\/kubernetes.io\/docs\/tasks\/access-application-cluster\/web-ui-dashboard\/#accessing-the-dashboard-ui\">https:\/\/kubernetes.io\/docs\/tasks\/access-application-cluster\/web-ui-dashboard\/#accessing-the-dashboard-ui<\/a><\/p>\n<p>\u8fd9\u91cc\u90e8\u7f72\u7684\u662fv1.10.1\u7248\u672c<\/p>\n<blockquote>\n<p>\u6ce8\u610f\uff1adashboard\u52a1\u5fc5\u90e8\u7f72\u5728master\u8282\u70b9<\/p>\n<\/blockquote>\n<h3>\u4e0b\u8f7d\u90e8\u7f72dashboard\u7684yaml\u6587\u4ef6\u5230\u672c\u5730\u5e76\u4fee\u6539\u62c9\u53d6\u955c\u50cf\u5730\u5740<\/h3>\n<p>\u7531\u4e8eyaml\u914d\u7f6e\u6587\u4ef6\u4e2d\u6307\u5b9a\u955c\u50cf\u4ecegoogle\u62c9\u53d6\uff0c\u5148\u4e0b\u8f7dyaml\u6587\u4ef6\u5230\u672c\u5730\uff0c\u4fee\u6539\u914d\u7f6e\u4ece\u963f\u91cc\u4e91\u4ed3\u5e93\u62c9\u53d6\u955c\u50cf\u3002<\/p>\n<p>\uff081\uff09https\u6a21\u5f0f<\/p>\n<p>\u6ce8\u610f\u5fc5\u987b\u63d0\u4f9b\u8bc1\u4e66\uff01<\/p>\n<pre><code>[root@k8smaster ~]# wget https:\/\/raw.githubusercontent.com\/kubernetes\/dashboard\/master\/aio\/deploy\/recommended\/kubernetes-dashboard.yaml<\/code><\/pre>\n<p>\uff082\uff09http\u6a21\u5f0f<\/p>\n<pre><code>[root@k8smaster ~]# wget https:\/\/raw.githubusercontent.com\/kubernetes\/dashboard\/master\/aio\/deploy\/alternative\/kubernetes-dashboard.yaml<\/code><\/pre>\n<p>\uff083\uff09v1.10.1\u7248\u672c<\/p>\n<pre><code># wget https:\/\/raw.githubusercontent.com\/kubernetes\/dashboard\/v1.10.1\/src\/deploy\/alternative\/kubernetes-dashboard.yaml<\/code><\/pre>\n<p>\u4fee\u6539114\u884c\u62c9\u53d6\u955c\u50cf\u5730\u5740\u4e3a\u963f\u91cc\u4e91\u7684\u5730\u5740\uff1a<\/p>\n<pre><code>image: registry.cn-hangzhou.aliyuncs.com\/google_containers\/kubernetes-dashboard-amd64:v1.10.1<\/code><\/pre>\n<pre><code>template:\n  metadata:\n    labels:\n      k8s-app: kubernetes-dashboard\n  spec:\n    containers:\n    - name: kubernetes-dashboard\n      #image: k8s.gcr.io\/kubernetes-dashboard-amd64:v2.0.0-alpha0\n      image: kubernetesdashboarddev\/kubernetes-dashboard-amd64:v2.0.0-alpha0\n      ports:\n      - containerPort: 9090\n        protocol: TCP<\/code><\/pre>\n<h3>\u90e8\u7f72dashboard<\/h3>\n<pre><code>kubectl create -f kubernetes-dashboard.yaml<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl create -f kubernetes-dashboard.yaml\nsecret\/kubernetes-dashboard-csrf created\nserviceaccount\/kubernetes-dashboard created\nrole.rbac.authorization.k8s.io\/kubernetes-dashboard-minimal created\nrolebinding.rbac.authorization.k8s.io\/kubernetes-dashboard-minimal created\ndeployment.apps\/kubernetes-dashboard created\nservice\/kubernetes-dashboard created<\/code><\/pre>\n<h3>\u67e5\u770bPod \u7684\u72b6\u6001\u4e3arunning\u8bf4\u660edashboard\u5df2\u7ecf\u90e8\u7f72\u6210\u529f<\/h3>\n<pre><code>kubectl get pods --all-namespaces\nkubectl get pod --namespace=kube-system -o wide | grep dashboard<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl get pods --all-namespaces\nNAMESPACE     NAME                                    READY   STATUS    RESTARTS   AGE\nkube-system   calico-node-2kw7x                       1\/2     Running   0          12h\nkube-system   calico-node-nzqzl                       1\/2     Running   2          13h\nkube-system   coredns-fb8b8dccf-twknr                 1\/1     Running   1          14h\nkube-system   coredns-fb8b8dccf-wc4pd                 1\/1     Running   1          14h\nkube-system   etcd-k8s-master                         1\/1     Running   1          14h\nkube-system   kube-apiserver-k8s-master               1\/1     Running   1          14h\nkube-system   kube-controller-manager-k8s-master      1\/1     Running   1          14h\nkube-system   kube-proxy-b298g                        1\/1     Running   1          14h\nkube-system   kube-proxy-srcdt                        1\/1     Running   0          12h\nkube-system   kube-scheduler-k8s-master               1\/1     Running   1          14h\nkube-system   kubernetes-dashboard-5d9599dc98-h4gpr   1\/1     Running   0          74s\n[root@k8s-master ~]# kubectl get pod --namespace=kube-system -o wide | grep dashboard\nkubernetes-dashboard-5d9599dc98-h4gpr   1\/1     Running   0          54s   172.16.0.6     k8s-master   &lt;none&gt;           &lt;none&gt;<\/code><\/pre>\n<p>\u540c\u65f6\uff0cDashboard\u4f1a\u5728<code>kube-system namespace<\/code>\u4e2d\u521b\u5efa\u81ea\u5df1\u7684<code>Deployment<\/code>\u548c<code>Service<\/code>\uff1a<\/p>\n<pre><code>kubectl get deployment kubernetes-dashboard --namespace=kube-system\nkubectl get service kubernetes-dashboard --namespace=kube-system<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl get deployment kubernetes-dashboard --namespace=kube-system\nNAME                   READY   UP-TO-DATE   AVAILABLE   AGE\nkubernetes-dashboard   1\/1     1            1           2m26s\n[root@k8s-master ~]# kubectl get service kubernetes-dashboard --namespace=kube-system\nNAME                   TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE\nkubernetes-dashboard   ClusterIP   10.104.31.113   &lt;none&gt;        443\/TCP   2m35s<\/code><\/pre>\n<h3>\u914d\u7f6e\u4f7f\u7528nodeport\u65b9\u5f0f\u8bbf\u95eedashport<\/h3>\n<p>\u8bbf\u95eedashboard\u7684\u65b9\u5f0f\u6709\u5f88\u591a\uff0c\u8fd9\u91cc\u4f7f\u7528\u7684\u662f\u914d\u7f6enodeport\u7684\u65b9\u5f0f\u6765\u8bbf\u95ee\u3002<\/p>\n<h4>\u4fee\u6539\u914d\u7f6e\u6587\u4ef6<\/h4>\n<p>\u4fee\u6539\u6587\u4ef6<code>kubernetes-dashboard.yaml<\/code>\uff0c\u5c06<code>service type<\/code>\u548c<code>nodeport<\/code>\u6dfb\u52a0\u8fdb\u53bb\uff0c\u6ce8\u610fk8s\u53ea\u652f\u630130000\u4ee5\u4e0a\u7684\u7aef\u53e3<\/p>\n<pre><code>[root@k8s-master ~]# vim kubernetes-dashboard.yaml  #\u52a0\u5165\u4e0b\u9762\u4e24\u884c\u914d\u7f6e<\/code><\/pre>\n<pre><code># ------------------- Dashboard Service ------------------- #\n\nkind: Service\napiVersion: v1\nmetadata:\n  labels:\n    k8s-app: kubernetes-dashboard\n  name: kubernetes-dashboard\n  namespace: kube-system\nspec:\n  type: NodePort    #\u6dfb\u52a0Service\u7684type\u4e3aNodePort\n  ports:\n    - port: 80\n      targetPort: 9090\n      nodePort: 30006    # \u6dfb\u52a0\u6620\u5c04\u5230\u865a\u62df\u673a\u7684\u7aef\u53e3\uff0ck8s\u53ea\u652f\u630130000\u4ee5\u4e0a\u7684\u7aef\u53e3\uff0c\u7aef\u53e3\u81ea\u5b9a\u4e49\n  selector:\n    k8s-app: kubernetes-dashboard<\/code><\/pre>\n<h4>\u4fee\u6539\u540e\uff0c\u91cd\u65b0\u5e94\u7528\u914d\u7f6e\u6587\u4ef6<\/h4>\n<pre><code>kubectl apply -f kubernetes-dashboard.yaml<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl apply -f kubernetes-dashboard.yaml\nWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\nsecret\/kubernetes-dashboard-certs configured\nWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\nsecret\/kubernetes-dashboard-csrf configured\nWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\nserviceaccount\/kubernetes-dashboard configured\nWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\nrole.rbac.authorization.k8s.io\/kubernetes-dashboard-minimal configured\nWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\nrolebinding.rbac.authorization.k8s.io\/kubernetes-dashboard-minimal configured\nWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\ndeployment.apps\/kubernetes-dashboard configured\nWarning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\nservice\/kubernetes-dashboard configured<\/code><\/pre>\n<h4>\u7aef\u53e3\u5df2\u7ecf\u53d8\u621030006<\/h4>\n<pre><code>kubectl get service -n kube-system | grep dashboard<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl get service -n kube-system | grep dashboard\nkubernetes-dashboard   NodePort    10.104.31.113   &lt;none&gt;        443:30006\/TCP            9m20s<\/code><\/pre>\n<h4>\u83b7\u53d6\u767b\u5f55dashboard\u7684token<\/h4>\n<pre><code>kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name |grep namespace) | grep token<\/code><\/pre>\n<pre><code>[root@k8s-master ~]# kubectl get service -n kube-system | grep dashboard\nkubernetes-dashboard   NodePort    10.110.148.107   &lt;none&gt;        80:30006\/TCP             3m28s\n[root@k8s-master ~]# kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name |grep namespace) | grep token\nName:         namespace-controller-token-hd6z8\nType:  kubernetes.io\/service-account-token\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJuYW1lc3BhY2UtY29udHJvbGxlci10b2tlbi1oZDZ6OCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJuYW1lc3BhY2UtY29udHJvbGxlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImRiNzY4YTFhLTE5NmQtNDNmNC1hYjVhLWRjYWJiZTMzMmU4NyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTpuYW1lc3BhY2UtY29udHJvbGxlciJ9.iW30zjU4md6AVQFsI7AEpIpkQRe9tWw8EH5oilYo4l7CZIQ24CaWe4OvqrssyLDttP4aCqv3-DE3YPbLo8bZIsp73cKiBytfFNBHVPiO0S4eR-IQFvJNfxwTj_xDK4ksBbxS0BDVIOU6TyYWDCbXuKXn35szPwcSDylE3H2FE_L8ZnvIMQHGi-gtTSNYDmyILqgSfYpb7tA-rGcbcvdsA44BC0xCH7ELFRFFKezi_rTq4JoW5HB5Z9SDSstU18h7UuPj8NXTS1j1IFtL8Xt1CEZrKf7yEZEKFdNtvwK7t1vDTMQDn8Bhi7mN2qd3WRf9QXRTN73sTfPJlWPdzK_LxA<\/code><\/pre>\n<h4>\u4f7f\u7528nodeport\u65b9\u5f0f\u8bbf\u95eedashboard<\/h4>\n<p>\u8bbf\u95ee\u5730\u5740\uff1a<a target=\"_blank\" rel=\"noopener\" href=\"http:\/\/nodeportIP:nodeport\">http:\/\/nodeportIP:nodeport<\/a><br \/>\n\u767b\u5f55\u5730\u5740\uff1a<a target=\"_blank\" rel=\"noopener\" href=\"http:\/\/119.29.172.118:30006\/#!\/login\">http:\/\/119.29.172.118:30006\/#!\/login<\/a><\/p>\n","protected":false},"excerpt":{"rendered":"<p>Kubernetes\u96c6\u7fa4\u90e8\u7f72\u65b9\u5f0f Minikube\u65b9\u5f0f Minikube\u662f\u4e00\u4e2a\u5de5\u5177\uff0c\u53ef\u4ee5\u5728\u672c\u5730\u5feb\u901f\u8fd0\u884c\u4e00\u4e2a\u5355\u70b9 [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[10],"tags":[356],"class_list":["post-1438","post","type-post","status-publish","format-standard","hentry","category-k8s","tag-kubernetes"],"_links":{"self":[{"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/posts\/1438","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/comments?post=1438"}],"version-history":[{"count":0,"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/posts\/1438\/revisions"}],"wp:attachment":[{"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/media?parent=1438"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/categories?post=1438"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.appblog.cn\/index.php\/wp-json\/wp\/v2\/tags?post=1438"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}