Calico搭建测试
节点准备
主机名
IP
系统
角色
docker0段
k8s-master
10.30.56.108
CentOS 7.3.1611
master
k8s-node001
10.88.200.31
CentOS 7.3.1611
node
172.27.100.0/24
k8s-node002
10.88.200.32
CentOS 7.3.1611
Node
172.28.100.1/24
master环境部署
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 # [root@k8s-master ~]# rpm -qa | grep kube kubernetes-client-1.8.4-1.el7.centos.x86_64 kubernetes-master-1.8.4-1.el7.centos.x86_64 etcd-3.2.7-1.el7.x86_64 # [root@k8s-master ~]# grep -Ev '^$|^#' /etc/kubernetes/apiserver KUBE_API_ADDRESS="--address=0.0.0.0" KUBE_API_PORT="--port=8080" KUBE_ETCD_SERVERS="--etcd_servers=http://10.30.56.108:2379" KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota" KUBE_API_ARGS=" --cors_allowed_origins=.* --service-node-port-range='8000-32767' --runtime-config=extensions/v1beta1 " [root@k8s-master ~]# grep -Ev '^$|^#' /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=true" KUBE_MASTER="--master=http://127.0.0.1:8080" [root@k8s-master ~]# grep -Ev '^$|^#' /etc/kubernetes/controller-manager KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 \ --service-cluster-ip-range=10.254.0.0/16 \ --cluster-name=kubernetes \ --leader-elect=true \ --node-monitor-grace-period=40s \ --node-monitor-period=5s \ --pod-eviction-timeout=50ms" [root@k8s-master ~]# KUBE_SCHEDULER_ARGS="--leader-elect=true" [root@k8s-master ~]# grep -Ev '^$|^#' /etc/kubernetes/proxy KUBE_PROXY_ARGS="--proxy-mode=iptables" [root@k8s-master ~]# grep -Ev '^$|^#' /etc/etcd/etcd.conf ETCD_NAME=default ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://10.30.56.108:2379" ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379,http://10.30.56.108:2379"
node 环境部署
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 # [root@k8s-node001 ~]# rpm -qa | grep docker docker-1.12.6-61.git85d7426.el7.centos.x86_64 docker-client-1.12.6-61.git85d7426.el7.centos.x86_64 docker-common-1.12.6-61.git85d7426.el7.centos.x86_64 [root@k8s-node001 ~]# rpm -qa | grep kube kubernetes-node-1.8.4-1.el7.centos.x86_64 kubernetes-client-1.8.4-1.el7.centos.x86_64 # [root@k8s-node001 ~]# grep -Ev '^$|^#' /etc/kubernetes kubernetes/ kubernetes.bak/ [root@k8s-node001 ~]# grep -Ev '^$|^#' /etc/kubernetes/ config controller-manager kubelet kubelet.kubeconfig proxy [root@k8s-node001 ~]# grep -Ev '^$|^#' /etc/kubernetes/config KUBE_LOGTOSTDERR="--logtostderr=true" KUBE_LOG_LEVEL="--v=0" KUBE_ALLOW_PRIV="--allow-privileged=true" KUBE_MASTER="--master=http://10.30.56.108:8080" [root@k8s-node001 ~]# grep -Ev '^$|^#' /etc/kubernetes/controller-manager KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 \ --service-cluster-ip-range=10.254.0.0/16 \ --cluster-name=kubernetes \ --leader-elect=true \ --node-monitor-grace-period=40s \ --node-monitor-period=5s \ --pod-eviction-timeout=50ms" [root@k8s-node001 ~]# grep -Ev '^$|^#' /etc/kubernetes/kubelet KUBELET_ADDRESS="--address=0.0.0.0" KUBELET_PORT="--port=10250" KUBELET_HOSTNAME="" KUBELET_API_SERVER="--kubeconfig=/etc/kubernetes/kubelet.kubeconfig" KUBELET_ARGS=" --cgroup-driver=systemd --docker-root=/home/docker --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0" [root@k8s-node001 ~]# grep -Ev '^$|^#' /etc/kubernetes/kubelet.kubeconfig apiVersion: v1 kind: Config clusters: - cluster: server: http://10.30.56.108:8080/ name: local contexts: - context: cluster: local name: local current-context: local [root@k8s-node001 ~]# grep -Ev '^$|^#' /etc/kubernetes/proxy KUBE_PROXY_ARGS="--proxy-mode=iptables" # [root@k8s-node001 kubernetes]# cat /etc/sysconfig/docker # /etc/sysconfig/docker # # # # OPTIONS=" --exec-opt native.cgroupdriver=cgroupfs --graph=/home/docker --default-ulimit nproc=65535:65535 --default-ulimit nofile=655350:655350 --storage-opt dm.basesize=40G --storage-opt dm.fs=ext4 --storage-driver=devicemapper --storage-opt dm.mountopt=nodiscard --storage-opt dm.blkdiscard=false --bip=172.27.100.1/24" INSECURE_REGISTRY=" --insecure-registry docker" # [root@k8s-node001 kubernetes]# rpm -qa | grep bird bird-1.4.5-2.el7.x86_64 bird6-1.4.5-2.el7.x86_64 [root@k8s-node001 kubernetes]# cat /etc/bird.conf log syslog all; # router id 10.88.200.31; # Define a route filter... filter test_filter { if net = 172.27.100.0/24 then accept; ### 注意 这个 网段需要和docker0的网段以及下面的calico定义的node所使用的网段在一个段 else reject; } # Define another routing table # table main; protocol direct { interface "*"; # Restrict network interfaces it works with } protocol kernel { import all; learn; # Learn all alien routes from the kernel # persist; scan time 20; # Scan kernel routing table every 20 seconds # import none; export all; # Default is export none # kernel table 254; } protocol device { scan time 10; # Scan interfaces every 10 seconds } protocol static { # import all; # export all; # table main; route 1.1.1.1:255.255.255.255 via "eth0"; } # Pipe protocol connects two routing tables... Beware of loops. # protocol pipe { # peer table testable; # Define what routes do we export to this protocol / import from it. # import all; # export all; # import none; # import filter test_filter; # import where source = RTS_DEVICE; # } protocol ospf MyOSPF { # tick 2; rfc1583compat yes; import filter test_filter; export filter test_filter; area 0.0.0.0 { stub no; interface "bond0" { hello 2; retransmit 6; cost 10; transmit delay 5; dead count 4; wait 50; type broadcast; authentication simple; password "momo1602"; }; interface "docker0" { hello 2; retransmit 6; cost 10; transmit delay 5; dead count 4; wait 50; type broadcast; authentication simple; password "momo1602"; }; }; } # # wget https://www.projectcalico.org/builds/calicoctl chmod +x calicoctl ln -s ./calicoctl /usr/bin/calicoctl # wget -N -P /opt/cni/bin https://github.com/projectcalico/cni-plugin/releases/download/v1.11.0/calico wget -N -P /opt/cni/bin https://github.com/projectcalico/cni-plugin/releases/download/v1.11.0/calico-ipam chmod +x /opt/cni/bin/calico /opt/cni/bin/calico-ipam # wget https://github.com/containernetworking/cni/releases/download/v0.3.0/cni-v0.3.0.tgz tar -zxvf cni-v0.3.0.tgz cp loopback /opt/cni/bin/ # [root@k8s-node001 net.d]# pwd /etc/cni/net.d [root@k8s-node001 net.d]# cat 10-calico.conf { "name": "k8s-pod-network", "cniVersion": "0.3.0", "type": "calico", "etcd_endpoints": "http://10.30.56.108:2379", "mtu": 1500, "ipam": { "type": "host-local", "subnet": "172.27.100.0/27" ### 注意 这个位置的网段,即为 该node能够给容器分配的网段,需要和docker0 以及 bird配置中的net accept的网段保持一致。 }, "policy": { "type": "k8s", "k8s_api_root": "http://10.30.56.108:8080" }, "kubernetes": { "kubeconfig": "/etc/cni/net.d/calico-kubeconfig" } } [root@k8s-node001 net.d]# cat calico-kubeconfig # Kubeconfig file for Calico CNI plugin. apiVersion: v1 kind: Config clusters: - name: local cluster: insecure-skip-tls-verify: true users: - name: calico contexts: - name: calico-context context: cluster: local user: calico current-context: calico-context [root@k8s-node001 bin]# pwd /opt/cni/bin [root@k8s-node001 bin]# ll total 68168 -rwxr-xr-x 1 root root 29062464 Dec 7 19:09 calico -rwxr-xr-x 1 root root 28424000 Dec 7 19:09 calico-ipam -rwxr-xr-x 1 root root 2814104 Dec 7 19:09 flannel -rwxr-xr-x 1 root root 2991965 Dec 7 19:09 host-local -rwxr-xr-x 1 root root 3026388 Dec 7 19:09 loopback -rwxr-xr-x 1 root root 3470464 Dec 7 19:09 portmap # [root@k8s-node001 ~]# cat /etc/systemd/system/calico-node.service [Unit] Description=calico node After=docker.service Requires=docker.service [Service] User=root Environment=ETCD_ENDPOINTS=http://10.30.56.108:2379 PermissionsStartOnly=true ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \ -e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \ -e IP= \ -e AS= \ -e CALICO_LIBNETWORK_ENABLED=true \ -e NO_DEFAULT_POOLS=true \ -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \ -e CALICO_LIBNETWORK_CREATE_PROFILES=false \ -e IP6= \ -v /var/run/calico:/var/run/calico \ -v /lib/modules:/lib/modules \ -v /run/docker/plugins:/run/docker/plugins \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /var/log/calico:/var/log/calico \ calico/node:v1.0.2 ExecStop=/usr/bin/docker rm -f calico-node Restart=always RestartSec=10 [Install] WantedBy=multi-user.target
master 和 node需要启动的服务
master
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 for i in {kube-controller-manager,kube-scheduler,kube-apiserver,etcd};do systemctl start ${i} && systemctl enable ${i} && systemctl status ${i}; done [root@k8s-master ~]# netstat -tunlp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 127.0.0.1:2380 0.0.0.0:* LISTEN 19222/etcd tcp 0 0 10.30.56.108:22 0.0.0.0:* LISTEN 11748/sshd tcp 0 0 10.30.56.108:10050 0.0.0.0:* LISTEN 22174/zabbix_agentd tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN 19222/etcd tcp 0 0 10.30.56.108:2379 0.0.0.0:* LISTEN 19222/etcd tcp6 0 0 :::10252 :::* LISTEN 27251/kube-controll tcp6 0 0 :::8080 :::* LISTEN 27714/kube-apiserve tcp6 0 0 :::6443 :::* LISTEN 27714/kube-apiserve tcp6 0 0 :::10251 :::* LISTEN 27252/kube-schedule udp 0 0 0.0.0.0:68 0.0.0.0:* 715/dhclient udp 0 0 10.30.56.108:123 0.0.0.0:* 10048/ntpd udp 0 0 127.0.0.1:123 0.0.0.0:* 10048/ntpd udp 0 0 127.0.0.1:514 0.0.0.0:* 2716/rsyslogd udp 0 0 0.0.0.0:22237 0.0.0.0:* 715/dhclient udp6 0 0 :::64756 :::* 715/dhclient
node
1 2 3 4 5 6 7 8 9 10 11 12 13 for i in {bird,kubelet,calico-node};do systemctl start ${i} && systemctl enable ${i} && systemctl status ${i}; done [root@k8s-node001 ~]# netstat -tunlp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp 0 0 127.0.0.1:10248 0.0.0.0:* LISTEN 97080/kubelet tcp 0 0 10.88.200.31:22 0.0.0.0:* LISTEN 56104/sshd tcp 0 0 10.88.200.31:10050 0.0.0.0:* LISTEN 87871/zabbix_agentd tcp6 0 0 :::10250 :::* LISTEN 97080/kubelet tcp6 0 0 :::10255 :::* LISTEN 97080/kubelet tcp6 0 0 :::4194 :::* LISTEN 97080/kubelet udp 0 0 10.88.200.31:123 0.0.0.0:* 67091/ntpd udp 0 0 127.0.0.1:123 0.0.0.0:* 67091/ntpd udp 0 0 127.0.0.1:514 0.0.0.0:* 2090/rsyslogd
状态测试
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 [root@k8s-master ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION k8s-node001 Ready <none> 9d v1.8.4 k8s-node002 Ready <none> 9d v1.8.4 [root@k8s-node001 ~]# calicoctl node status Calico process is running. IPv4 BGP status +--------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +--------------+-------------------+-------+----------+-------------+ | 10.88.200.32 | node-to-node mesh | up | 04:33:22 | Established | +--------------+-------------------+-------+----------+-------------+ [root@k8s-node002 ~]# calicoctl node status Calico process is running. IPv4 BGP status +--------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +--------------+-------------------+-------+----------+-------------+ | 10.88.200.31 | node-to-node mesh | up | 04:33:22 | Established | +--------------+-------------------+-------+----------+-------------+
master 启动容器测试网络通信
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 [root@k8s-node001 ~] NAME [root@k8s-node001 ~] NAME [root@k8s-master ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE busybox-1 1 /1 Running 15 1 d 172.27 .100.18 k8s-node001busybox-2 1 /1 Running 15 1 d 172.28 .100.17 k8s-node002 [root@k8s-master ~]# kubectl get pods -n secondary -o wide NAME READY STATUS RESTARTS AGE IP NODE nginx-5cfcf7c4b6-jj4qq 1 /1 Running 0 1 d 172.28 .100.18 k8s-node002web-57 d67545cb-42 qdf 1 /1 Running 0 1 d 172.27 .100.19 k8s-node001 [root@k8s-master ~]# kubectl exec busybox-1 ping 172.27 .100.18 PING 172.27 .100.18 (172.27 .100.18 ): 56 data bytes 64 bytes from 172.27 .100.18 : seq= 0 ttl= 64 time= 0.084 ms^C[root@k8s-master ~]# [root @k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.17 ^C[root@k8s-master ~]# [root @k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.18 ^C[root@k8s-master ~]# kubectl exec busybox-1 ping 172.27 .100.19 ^C[root@k8s-master ~]# 结论: 只能ping 通自己,其他都不通[root@k8s-node001 ~] apiVersion: v1 kind: profile metadata: name: k8s_ns.default labels: calico/k8s_ns: default spec : ingress: - action: allow egress: - action: allow [root@k8s-node001 ~] Successfully created 1 'profile' resource(s) [root@k8s-node001 ~] NAME k8s_ns.default [root@k8s-master ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE busybox-1 1 /1 Running 16 1 d 172.27 .100.18 k8s-node001busybox-2 1 /1 Running 16 1 d 172.28 .100.17 k8s-node002 [root@k8s-master ~]# kubectl get pods -o wide -n secondary NAME READY STATUS RESTARTS AGE IP NODE nginx-5cfcf7c4b6-jj4qq 1 /1 Running 0 1 d 172.28 .100.18 k8s-node002web-57 d67545cb-42 qdf 1 /1 Running 0 1 d 172.27 .100.19 k8s-node001 [root@k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.17 PING 172.28 .100.17 (172.28 .100.17 ): 56 data bytes 64 bytes from 172.28 .100.17 : seq= 0 ttl= 62 time= 0.287 ms64 bytes from 172.28 .100.17 : seq= 1 ttl= 62 time= 0.231 ms^C[root@k8s-master ~]# [root @k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.18 ^C[root@k8s-master ~]# kubectl exec busybox-1 ping 172.27 .100.19 ^C[root@k8s-master ~]# ### 结论:只能ping 通default名称空间的[root@k8s-node001 ~] apiVersion: v1 kind: profile metadata: name: k8s_ns.secondary labels: calico/k8s_ns: secondary spec : ingress: - action: allow egress: - action: allow [root@k8s-node001 ~] Successfully created 1 'profile' resource(s) [root@k8s-node001 ~] NAME k8s_ns.default k8s_ns.secondary [root@k8s-master ~]# kubectl get pods -o wide -n secondary NAME READY STATUS RESTARTS AGE IP NODE nginx-5cfcf7c4b6-jj4qq 1 /1 Running 0 1 d 172.28 .100.18 k8s-node002web-57 d67545cb-42 qdf 1 /1 Running 0 1 d 172.27 .100.19 k8s-node001 [root@k8s-master ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE busybox-1 1 /1 Running 16 1 d 172.27 .100.18 k8s-node001busybox-2 1 /1 Running 16 1 d 172.28 .100.17 k8s-node002 [root@k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.17 PING 172.28 .100.17 (172.28 .100.17 ): 56 data bytes 64 bytes from 172.28 .100.17 : seq= 0 ttl= 62 time= 0.314 ms64 bytes from 172.28 .100.17 : seq= 1 ttl= 62 time= 0.259 ms^C[root@k8s-master ~]# [root @k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.18 PING 172.28 .100.18 (172.28 .100.18 ): 56 data bytes 64 bytes from 172.28 .100.18 : seq= 0 ttl= 62 time= 0.250 ms64 bytes from 172.28 .100.18 : seq= 1 ttl= 62 time= 0.200 ms^C[root@k8s-master ~]# kubectl exec busybox-1 ping 172.27 .100.19 PING 172.27 .100.19 (172.27 .100.19 ): 56 data bytes 64 bytes from 172.27 .100.19 : seq= 0 ttl= 63 time= 0.134 ms64 bytes from 172.27 .100.19 : seq= 1 ttl= 63 time= 0.101 ms^C[root@k8s-master ~]# ### 结论:两个名称空间完全放开,可以互通 ### 基于CIDR (限制对secondary 目的地址为172.28 .100.0 /27 的访问)[root@k8s-master ~]# kubectl get pods -o wide -n secondary NAME READY STATUS RESTARTS AGE IP NODE nginx-5cfcf7c4b6-jj4qq 1 /1 Running 0 1 d 172.28 .100.18 k8s-node002web-57 d67545cb-42 qdf 1 /1 Running 0 1 d 172.27 .100.19 k8s-node001 [root@k8s-master ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE busybox-1 1 /1 Running 16 1 d 172.27 .100.18 k8s-node001busybox-2 1 /1 Running 16 1 d 172.28 .100.17 k8s-node002 [root@k8s-master ~]# kubectl exec busybox-1 ping 172.27 .100.19 PING 172.27 .100.19 (172.27 .100.19 ): 56 data bytes 64 bytes from 172.27 .100.19 : seq= 0 ttl= 63 time= 0.178 ms64 bytes from 172.27 .100.19 : seq= 1 ttl= 63 time= 0.129 ms^C[root@k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.18 ^C[root@k8s-master ~]# kubectl exec busybox-2 ping 172.28 .100.18 ^C[root@k8s-master ~]# kubectl exec busybox-2 ping 172.27 .100.19 PING 172.27 .100.19 (172.27 .100.19 ): 56 data bytes 64 bytes from 172.27 .100.19 : seq= 0 ttl= 62 time= 0.285 ms64 bytes from 172.27 .100.19 : seq= 1 ttl= 62 time= 0.208 ms[root@k8s-node001 ~] Successfully replaced 1 'profile' resource(s) [root@k8s-node001 ~] apiVersion: v1 kind: profile metadata: name: k8s_ns.secondary labels: calico/k8s_ns: secondary spec : ingress: - action: deny source: net: 172.27 .100.0 /27 - action: allow destination: net: 172.27 .100.0 /27 egress: - action: allow ^C[root@k8s-master ~]# kubectl get pods -o wide -n secondary NAME READY STATUS RESTARTS AGE IP NODE nginx-5cfcf7c4b6-jj4qq 1 /1 Running 0 1 d 172.28 .100.18 k8s-node002web-57 d67545cb-42 qdf 1 /1 Running 0 1 d 172.27 .100.19 k8s-node001 [root@k8s-master ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE busybox-1 1 /1 Running 16 1 d 172.27 .100.18 k8s-node001busybox-2 1 /1 Running 16 1 d 172.28 .100.17 k8s-node002 ^C[root@k8s-master ~]# kubectl exec busybox-1 ping 172.27 .100.19 ^C[root@k8s-master ~]# kubectl exec busybox-1 ping 172.28 .100.18 ^C[root@k8s-master ~]# kubectl exec busybox-2 ping 172.28 .100.18 ^C[root@k8s-master ~]# kubectl exec busybox-2 ping 172.27 .100.19 PING 172.27 .100.19 (172.27 .100.19 ): 56 data bytes 64 bytes from 172.27 .100.19 : seq= 0 ttl= 62 time= 0.281 ms
以上就是简单的访问测试,calicoctl中基于profile和policy进行限制隔离,用法也比较灵活,还需要多加了解测试。 大家可以登录这个集群一起调试交流。