1,环境初始化配置
1,#关闭现有防火墙firewalld
systemctl disable firewalld
systemctl stop firewalld
firewall-cmd --state
2,#selinux
setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
3,#时间同步
yum -y install ntpdate
crontab -e
0 */1 * * * /usr/sbin/ntpdate time1.aliyun.com
4,host主机名解析
cat >>/etc/hosts /etc/sysconfig/modules/ipvs.modules /etc/containerd/config.toml
-----
SystemdCgroup = false 改为 SystemdCgroup = true
# sandbox_image = "k8s.gcr.io/pause:3.6"
改为:
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"
------
mkdir -p /etc/containerd/certs.d/docker.io/
cat /etc/containerd/certs.d/docker.io/hosts.txt
[host."http://hub-mirror.c.163.com"]
capabilities = ["pull","resolve"]
[host."https://docker.mirrors.ustc.edu.cn"]
capabilities = ["pull","resolve"]
EOF
#镜像加速配置,后来增加
[root@bj-k8s-node1 ~]# vim /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[root@bj-k8s-node2 docker.io]# cat > /etc/containerd/certs.d/docker.io/hosts.toml
2,安装 k8s1.27
1,配置k8s V1.27 的yum 源
cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2,查找版本
yum list kubelet --showduplicates | sort -r |grep 1.27
3,安装最新版本
yum install -y kubectl-1.27.1-0 kubelet-1.27.1-0 kubeadm-1.27.1-0
4,为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。
cat /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
EOF
5,配置开机启动
systemctl enable kubelet
6,初始化集群
[root@bj-k8s-master1 ~]# kubeadm init --kubernetes-version=v1.27.1 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=172.16.2.146 --image-repository registry.aliyuncs.com/google_containers
–apiserver-advertise-address 集群通告地址
–image-repository 由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址
–kubernetes-version K8s版本,与上面安装的一致
–service-cidr 集群内部虚拟网络,Pod统一访问入口
–pod-network-cidr Pod网络,,与下面部署的CNI网络组件yaml中保持一致
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
htt服务器托管网ps://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.16.2.146:6443 --token ngv6ww.3nkcp0uydt7dp47a
--discovery-token-ca-cert-hash sha256:0服务器托管网f1661c7c2484125021b2824ddeebc70385be3f426b4ed9f79b00ae78bdfd7c
3,安装网络插件
官网:https://docs.tigera.io/calico/latest/getting-started/kubernetes/quickstart#big-picture
1,安装 Tigera Calico 操作员和自定义资源定义。
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-operator.yaml
2,通过创建必要的自定义资源来安装 Calico。修改相关的子网清单
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/custom-resources.yaml。
3,查看日志
[root@bj-k8s-master1 home]# kubectl logs tigera-operator-5f4668786-ck9zn -n tigera-operator
4,确认所用pod在运行
[root@bj-k8s-master1 home]# watch kubectl get pods -n calico-system
服务器托管,北京服务器托管,服务器租用 http://www.fwqtg.net
机房租用,北京机房租用,IDC机房托管, http://www.fwqtg.net
theme: qklhk-chocolate highlight: a11y-dark react17放弃了之前的expirationTime而启用了lane模型,故而在原来16的基础上又产生了更多的二进制运算,在接下来的一段时间我打算把这些二进制运算都整明白…