k8s_archive

Ubuntu 24.04 docker installl

1
sudo vim /etc/apt/sources.list.d/ubuntu.sources
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
Types: deb
URIs: http://mirrors.ustc.edu.cn/ubuntu/
Suites: noble noble-updates noble-security
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg


Types: deb
URIs: http://mirrors.tuna.tsinghua.edu.cn/ubuntu/
Suites: noble noble-updates noble-security
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg


Types: deb
URIs: http://mirrors.aliyun.com/ubuntu/
Suites: noble noble-updates noble-security
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg


Types: deb
URIs: http://mirrors.163.com/ubuntu/
Suites: noble noble-updates noble-security
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do apt-get remove $pkg; done

apt-get update
apt-get install ca-certificates curl gnupg


install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg

echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu \
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
tee /etc/apt/sources.list.d/docker.list > /dev/null


apt-get update
apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

reference: docker-ce | 镜像站使用帮助 | 清华大学开源软件镜像站 | Tsinghua Open Source Mirror

docker without root

1
2
3
4
5
6
sudo cat /etc/group | grep docker

sudo groupadd docker
sudo gpasswd -a ${USER} docker
sudo chmod a+rw /var/run/docker.sock
sudo systemctl restart docker

docker mirror

1
2
3
4
5
6
7
sudo tee /etc/docker/daemon.json <<EOF
{
"registry-mirrors": [
"https://docker.xuanyuan.me"
]
}
EOF

reference 1

https://linuxmirrors.cn/docker.sh

reference 2

国内主流 Docker Hub 镜像加速站清单

地址 运营方 类型 说明
https://docker.xuanyuan.me 轩辕镜像免费版 Cloudflare+国内CDN 官网支持搜索镜像、配置简单、有会员解答群、屏蔽违法内容、境内公司运营(非常稳定)
https://xuanyuan.cloud 轩辕镜像专业版 国内CDN 需要登录,速度稳定,支持爱快,群晖,极空间,威联通 nas,支持搜索镜像、配置简单、有会员解答群、屏蔽违法内容、境内公司运营(非常稳定)
https://mirror.ccs.tencentyun.com 腾讯云 国内CDN 仅腾讯云服务器内推荐使用
https://xxx.mirror.aliyuncs.com 阿里云 国内CDN 仅阿里云服务器内推荐使用(https://help.aliyun.com/zh/acr/user-guide/accelerate-the-pulls-of-docker-official-images?spm=a2c4g.11186623.0.i7
https://1ms.run/ 毫秒镜像 Cloudflare、境内CDN 免费、提供在线技术支持、长期维护、完整文档、活跃社区。

cri-dockerd

Releases · Mirantis/cri-dockerd

不用docker,可选的 cri 有 containerd (unix:///run/containerd/containerd.sock),和 CRI-O

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
cat > /etc/systemd/system/cri-dockerd.service<<-EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-dockerd.socket #system cri-dockerd.socket

[Service]
Type=notify
ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause
--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin --container-runtime-endpoint=unix:///var/run/cri-dockerd.sock --cri-dockerd-root-directory=/var/lib/dockershim --docker-endpoint=unix:///var/run/docker.sock --cri-dockerd-root-directory=/var/lib/docker
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF



cat > /etc/systemd/system/cri-dockerd.socket <<-EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-dockerd.service #systemd cri-dockerd.servics

[Socket]
ListenStream=/var/run/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target
EOF

# 从阿里云镜像仓库拉取 pause 镜像

sudo docker pull registry.aliyuncs.com/google_containers/pause:3.10

# 重新标记为 k8s.io 的镜像

sudo docker tag registry.aliyuncs.com/google_containers/pause:3.10 registry.k8s.io/pause:3.10

# 验证镜像

sudo docker images | grep pause

if arch and pacman

yay -S cri-dockerd-bin

1
2
3
systemctl daemon-reload
systemctl enable cri-dockerd.service
systemctl restart cri-dockerd.service
1
ls  /var/run | grep docker
1
2
3
4
5
sudo systemctl enable --now docker

sudo systemctl enable --now cri-dockerd

sudo systemctl enable --now kubelet

multi nodes

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# 分机器
sudo hostnamectl set-hostname "k8smaster"
sudo hostnamectl set-hostname "k8snode1"
sudo hostnamectl set-hostname "k8snode2"

# 不分机器
cat >> /etc/hosts << EOF
192.168.31.224 k8smaster
192.168.31.225 k8snode1
192.168.31.226 k8snode2
EOF

timedatectl set-timezone Asia/Shanghai

sudo apt install -y ntpsec-ntpdate
ntpdate ntp.aliyun.com

crontab -e
0 0 * * * ntpdate ntp.aliyun.com

内核转发和网桥过滤

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF


modprobe overlay
modprobe br_netfilter


cat << EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF


sysctl --system

sudo swapoff -a
sudo vim /etc/fstab
注释掉 /dev/sda2 none swap sw 0 0
1
>apt install -y ipset ipvsadm

ipvs stands for IP Virtual Server. It's a software-based load balancer that allows multiple network services to be presented to clients as a single virtual IP address.

Key functions of ipvs:

  • Load balancing: Distributes incoming client requests across multiple servers, improving performance and availability.
  • Failover: Automatically redirects traffic to a backup server if the primary server becomes unavailable.
  • Persistence: Maintains client connections with a specific server, improving user experience for applications that require stateful connections.
  • Health checks: Monitors the health of backend servers and prevents traffic from being sent to unhealthy servers.

ipvs is commonly used in:

  • Web servers: Distributing web traffic to multiple web servers.
  • Application servers: Balancing requests to application servers.
  • Databases: Load balancing database connections.

k8s keyring

1
2
3
4
5
6
7
8
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

# echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list

echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.tuna.tsinghua.edu.cn/kubernetes/core:/stable:/v1.33/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list


apt update
1
2
3
apt-cache policy kubeadm
apt-cache showpkg kubeadm
apt-cache madison kubeadm

防止自动更新

1
apt-mark hold kubelet kubeadm kubectl

进入文件kubelet,1.30版本之后都是在 /etc/default/kubelet,之前 /etc/sysconfig/kubelet

1
2
3
vim /etc/default/kubelet

KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
1
systemctl enable kubelet

cidr

规划pod/service网段,原则只有一个:三个网段不重复,没有交叉即可

  • 宿主机网段:前面已经规划过。即:192.168.31.0/24
  • service网段:10.96.0.0/12
  • pod网段:10.244.0.0/16
1
2
# master节点执行
kubeadm init --kubernetes-version=1.33.3 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --image-repository=registry.aliyuncs.com/google_containers --cri-socket=unix:///var/run/cri-dockerd.sock --upload-certs --v=9

参数释义:

  • kubernetes-version:指定k8s的版本,
  • control-plane-endpoint:可以理解为集群master的命名,随意写即可
  • apiserver-advertise-address:集群中master的地址
  • pod-network-cidr:pod网段地址,只要不与集群网段和service网段重复即可
  • service-cidr:service网段地址,只要不与集群网段和pod网段重复即可
  • image-repository:指定使用国内镜像
  • cri-socket:指定使用的容器运行时,如果你使用的containerd容器,那就不用写这个参数
  • v:日志级别,9表示输出的信息会很详细
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.217.140:6443 --token xxxxxxxxxxxxxxxxx \
--discovery-token-ca-cert-hash sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
1
2
3
4
# 其余 worker节点执行 cri-socket是指定容器运行时,如果是containerd,可以不用写该参数
kubeadm join k8smaster:6443 --token xxxxxxxxxxxxxxxxx \
--discovery-token-ca-cert-hash sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx \
--cri-socket=unix:///var/run/cri-dockerd.sock
1
kubectl get nodes

calico

Tutorial: Install Calico on single-host k8s cluster | Calico Documentation

1
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.30.2/manifests/tigera-operator.yaml
1
wget  https://raw.githubusercontent.com/projectcalico/calico/v3.30.2/manifests/custom-resources.yaml

修改 custom-resources.yaml 中的 pod cidr 10.244.0.0/16,然后等待上一个 create 执行完成,然后

1
kubectl create -f custom-resources.yaml
1
watch kubectl get pod -n calico-system
1
kubectl describe pod calico-node-pdf78  -n calico-system
1
kubectl get cs

集群目前还只有一个master节点,而k8s集群默认情况下是不会把pod调度到master节点的。可以解除这种限制:

1
2
3
4
5
6
7
8
9
# 查看 taint 先
kubectl describe node ray-vubuntu
kubectl taint nodes --all node-role.kubernetes.io/control-plane-


# 设置污点
kubectl taint nodes <node-name> key1=value1:NoSchedule
# 去除污点
kubectl taint nodes <node-name> key1:NoSchedule-

reset opreation

1
2
3
4
5
6
7
8
9
# kubeadm reset
sudo kubeadm reset --force --cri-socket=unix:///var/run/cri-dockerd.sock

sudo rm -rf /root/.kube
sudo rm -rf /etc/cni/net.d
sudo rm -rf /etc/kubernetes/*

sudo systemctl restart docker
sudo systemctl restart kubelet

metallb

Documentation

1
helm install metallb metallb/metallb -n metallb-system --create-namespace
1
kubectl apply -f metallb/metallb-l2-pool.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: production
namespace: metallb-system
spec:
# Production services will go here. Public IPs are expensive.
addresses:
- 10.69.220.50-10.69.220.70

---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2network
namespace: metallb-system
spec:
ipAddressPools:
- production

k8s gateway

Git

1
2
helm repo add k8s_gateway https://ori-edge.github.io/k8s_gateway/
helm install exdns --set domain=pop.dev.cn k8s_gateway/k8s-gateway -n gateway --create-namespace
1
/etc/systemd/resolved.conf 中修改 DNS 添加 gateway external ip

cert manager

1
2
3
# kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.18.2/cert-manager.yaml

kubectl apply -f cert-manager/cert-manager.yaml

openssl 生成证书,或者证书链,配置上 ca-key-pair

1
kubectl create secret tls ca-key-pair --cert=tls/certs/ks-interm.pem --key=tls/keys/ks-interm.key.pem -n cert-manager
1
kubectl apply -f cert-manager/ca-cluster-issuer.yaml

检查证书的详细信息 openssl x509 -in <(kubectl get secret simpleweb-tls -n default -o jsonpath='{.data.tls.crt}' | base64 -d) -text -noout

openebs

1
2
helm repo add openebs https://openebs.github.io/openebs
helm repo update
1
helm install openebs --namespace openebs openebs/openebs --set engines.replicated.mayastor.enabled=false  --create-namespace

单节点出现调度问题,可以将副本书设置为1

kubectl -n openebs get sts openebs-loki -o yaml | grep -nA20 affinity

kubectl -n openebs scale sts openebs-loki --replicas=1

promethus

1
2
3
4
5
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
# helm repo add stable https://kubernetes-charts.storage.googleapis.com/
helm repo update

# helm show values prometheus-community/kube-prometheus-stack > kube-prometheus-stack-values.yaml
1
helm install prometheus prometheus-community/kube-prometheus-stack --namespace prometheus --create-namespace

网络问题解决方法一:

1
2
3
4
5
# git clone https://github.com/prometheus-community/helm-charts.git

helm dependency build charts/kube-prometheus-stack/

helm install prometheus ./charts/kube-prometheus-stack --namespace prometheus --create-namespace

port-forwardings

Prometheus-UI

​ kubectl port-forward service/prometheus-kube-prometheus-prometheus 9090

Alert Manager UI

​ kubectl port-forward svc/prometheus-kube-prometheus-alertmanager 9093

Grafana

​ kubectl port-forward deployment/prometheus-grafana 3000

Grafana Dashboard credentials

​ user: admin ​ pwd: prom-operator (from values.yaml file set as default)

pwd get from:

kubectl --namespace prometheus get secrets prometheus-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo

redis-exporter

​ kubectl port-forward service/redis-exporter-prometheus-redis-exporter 9121

exporter

比如 redis exporter 测试:

1
helm install redis-exporter prometheus-community/prometheus-redis-exporter -f prometheus-redis_values.yaml

测试一个 redis exporter 同时监控多个 redis 节点:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-master-1
labels:
app: redis
lg-scrape-redis: default
spec:
selector:
matchLabels:
app: redis
role: master
tier: backend
replicas: 1
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: redis
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

---
apiVersion: v1
kind: Service
metadata:
name: redis-master-1
labels:
app: redis
role: master
tier: backend
lg-scrape-redis: default
spec:
ports:
- port: 6379
targetPort: 6379
nodePort: 30001
type: NodePort
selector:
app: redis
role: master
tier: backend

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-master-2
labels:
app: redis
lg-scrape-redis: default
spec:
selector:
matchLabels:
app: redis
role: master
tier: backend
replicas: 1
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: redis
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

---
apiVersion: v1
kind: Service
metadata:
name: redis-master-2
labels:
app: redis
role: master
tier: backend
lg-scrape-redis: default
spec:
ports:
- port: 6379
targetPort: 6379
nodePort: 30002
type: NodePort
selector:
app: redis
role: master
tier: backend

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-master-3
labels:
app: redis
spec:
selector:
matchLabels:
app: redis
role: master
tier: backend
replicas: 1
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: redis
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

---
apiVersion: v1
kind: Service
metadata:
name: redis-master-3
labels:
app: redis
role: master
tier: backend
spec:
ports:
- port: 6379
targetPort: 6379
nodePort: 30003
type: NodePort
selector:
app: redis
role: master
tier: backend

exporter 部署:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: redis-exporter
name: redis-exporter
spec:
replicas: 1
selector:
matchLabels:
app: redis-exporter
template:
metadata:
labels:
app: redis-exporter
spec:
containers:
- name: redis-exporter
image: oliver006/redis_exporter:latest
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 9121

---
apiVersion: v1
kind: Service
metadata:
name: redis-exporter
labels:
app: redis-exporter
spec:
ports:
- name: redis-exporter
port: 9121
targetPort: 9121
selector:
app: redis-exporter

k8s sd 监控配置:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
apiVersion: monitoring.coreos.com/v1alpha1
kind: ScrapeConfig
metadata:
name: k8s-sd-config
namespace: default
labels:
release: prometheus
spec:
jobName: 'redis_exporter_k8s_sd'
scrapeInterval: 10s
kubernetesSDConfigs:
- role: endpoints
relabelings:
- sourceLabels: [__meta_kubernetes_service_label_lg_scrape_redis]
action: keep
regex: default
- targetLabel: __metrics_path__
replacement: /scrape
- targetLabel: __param_target
sourceLabels: [__address__]
- targetLabel: __address__
replacement: redis-exporter.default.svc.cluster.local:9121
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- sourceLabels: [__meta_kubernetes_namespace]
action: replace
targetLabel: kubernetes_namespace
- sourceLabels: [__meta_kubernetes_service_name]
action: replace
targetLabel: kubernetes_name
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: kubernetes_pod_name
- sourceLabels: [__meta_kubernetes_pod_name]
action: replace
targetLabel: instance
regex: (.*redis.*)
# - sourceLabels: [__meta_kubernetes_pod_name]
# action: keep
# regex: (.*redis.*)
# - sourceLabels: [__meta_kubernetes_pod_container_name]
# action: keep
# regex: (.*redis.*)
# - sourceLabels: [__meta_kubernetes_pod_phase]
# action: keep
# regex: Running
# - sourceLabels: [__meta_kubernetes_endpoints_name]
# action: keep
# regex: (.*redis.*)
# - sourceLabels: [__meta_kubernetes_service_label_app_kubernetes_io_name]
# action: keep
# regex: (.*redis.*)
# - sourceLabels: [__meta_kubernetes_service_name]
# action: keep
# regex: (.*redis.*)
# - sourceLabels: [__meta_kuernetes_pod_label_app_kubernetes_io_name]
# action: keep
# regex: (.*redis.*)

通过 http:://exporter_addr:9121/scrape?target=redis_addr:6379 查看指定结果。

k8s with private repository

docker-secret

1
2
3
4
5
6
7
apiVersion: v1
kind: Secret
metadata:
name: my-registry-key
data:
.dockerconfigjson: base64-encoded-contents-of-.docker/config.json-file
type: kubernetes.io/dockerconfigjson

deployment

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app
labels:
app: my-app
spec:
replicas: 1
selector:
matchLabels:
app: my-app
template:
metadata:
labels:
app: my-app
spec:
imagePullSecrets:
- name: my-registry-key
containers:
- name: my-app
image: privat-repo/my-app:1.3
imagePullPolicy: Always
ports:
- containerPort: 3000
print full docker login command for aws ecr

​ aws ecr get-login

login in docker private repo

​ docker login -u username -p password

generated file

​ cat .docker/config.json ​ cat .docker/config.json | base64

create docker login secret from config.json file

​ kubectl create secret generic my-registry-key
​--from-file=.dockerconfigjson=.docker/config.json
​--type=kubernetes.io/dockerconfigjson

​ kubectl create secret generic my-registry-key --from-file=.dockerconfigjson=.docker/config.json --type=kubernetes.io/dockerconfigjson

​ kubect get secret

create docker login secret with login credentials

​ kubectl create secret docker-registry my-registry-key
​--docker-server=https://private-repo
​--docker-username=user
​--docker-password=pwd

​ kubectl create secret docker-registry my-registry-key --docker-server=https://private-repo --docker-username=user --docker-password=pwd

access minikube console

​ minikube ssh

copy config.json file from Minikube to my host

​ scp -i \((minikube ssh-key) docker@\)(minikube ip):.docker/config.json .docker/config.json

ingress nginx

1
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.13.0/deploy/static/provider/cloud/deploy.yaml

测试配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: simple-web
namespace: default
labels:
app: simple-web
spec:
replicas: 1
selector:
matchLabels:
app: simple-web
template:
metadata:
labels:
app: simple-web
spec:
containers:
- name: httpd
image: httpd:latest
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
---
apiVersion: v1
kind: Service
metadata:
name: simple-web-service
namespace: default
labels:
app: simple-web
spec:
selector:
app: simple-web
ports:
- port: 80
targetPort: 80
protocol: TCP
type: ClusterIP
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: simpleweb-tls
namespace: default
spec:
secretName: simpleweb-tls
issuerRef:
name: ca-issuer
kind: ClusterIssuer
commonName: simpleweb.dev.lg.com
dnsNames:
- simpleweb.dev.lg.com
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: simpleweb-ingress
namespace: default
annotations:
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- simpleweb.dev.lg.com
secretName: simpleweb-tls
rules:
- host: simpleweb.dev.lg.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: simple-web-service
port:
number: 80

本博客所有文章除特别声明外,均采用 CC BY-SA 4.0 协议 ,转载请注明出处!