kilo-k3s-userspace-heterogeneous.yaml 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. apiVersion: v1
  2. kind: ConfigMap
  3. metadata:
  4. name: kilo
  5. namespace: kube-system
  6. labels:
  7. app.kubernetes.io/name: kilo
  8. data:
  9. cni-conf.json: |
  10. {
  11. "cniVersion":"0.3.1",
  12. "name":"kilo",
  13. "plugins":[
  14. {
  15. "name":"kubernetes",
  16. "type":"bridge",
  17. "bridge":"kube-bridge",
  18. "isDefaultGateway":true,
  19. "forceAddress":true,
  20. "mtu": 1420,
  21. "ipam":{
  22. "type":"host-local"
  23. }
  24. },
  25. {
  26. "type":"portmap",
  27. "snat":true,
  28. "capabilities":{
  29. "portMappings":true
  30. }
  31. }
  32. ]
  33. }
  34. ---
  35. apiVersion: v1
  36. kind: ServiceAccount
  37. metadata:
  38. name: kilo
  39. namespace: kube-system
  40. ---
  41. apiVersion: rbac.authorization.k8s.io/v1
  42. kind: ClusterRole
  43. metadata:
  44. name: kilo
  45. rules:
  46. - apiGroups:
  47. - ""
  48. resources:
  49. - nodes
  50. verbs:
  51. - list
  52. - get
  53. - patch
  54. - watch
  55. - apiGroups:
  56. - kilo.squat.ai
  57. resources:
  58. - peers
  59. verbs:
  60. - list
  61. - update
  62. - watch
  63. - apiGroups:
  64. - apiextensions.k8s.io
  65. resources:
  66. - customresourcedefinitions
  67. verbs:
  68. - create
  69. ---
  70. apiVersion: rbac.authorization.k8s.io/v1
  71. kind: ClusterRoleBinding
  72. metadata:
  73. name: kilo
  74. roleRef:
  75. apiGroup: rbac.authorization.k8s.io
  76. kind: ClusterRole
  77. name: kilo
  78. subjects:
  79. - kind: ServiceAccount
  80. name: kilo
  81. namespace: kube-system
  82. ---
  83. apiVersion: apps/v1
  84. kind: DaemonSet
  85. metadata:
  86. name: kilo
  87. namespace: kube-system
  88. labels:
  89. app.kubernetes.io/name: kilo
  90. spec:
  91. selector:
  92. matchLabels:
  93. app.kubernetes.io/name: kilo
  94. template:
  95. metadata:
  96. labels:
  97. app.kubernetes.io/name: kilo
  98. spec:
  99. nodeSelector:
  100. nkml.squat.ai/wireguard: "true"
  101. serviceAccountName: kilo
  102. hostNetwork: true
  103. containers:
  104. - name: kilo
  105. image: squat/kilo
  106. args:
  107. - --kubeconfig=/etc/kubernetes/kubeconfig
  108. - --hostname=$(NODE_NAME)
  109. - --interface=kilo0
  110. env:
  111. - name: NODE_NAME
  112. valueFrom:
  113. fieldRef:
  114. fieldPath: spec.nodeName
  115. securityContext:
  116. privileged: true
  117. volumeMounts:
  118. - name: cni-conf-dir
  119. mountPath: /etc/cni/net.d
  120. - name: kilo-dir
  121. mountPath: /var/lib/kilo
  122. - name: kubeconfig
  123. mountPath: /etc/kubernetes/kubeconfig
  124. readOnly: true
  125. - name: lib-modules
  126. mountPath: /lib/modules
  127. readOnly: true
  128. - name: xtables-lock
  129. mountPath: /run/xtables.lock
  130. readOnly: false
  131. initContainers:
  132. - name: install-cni
  133. image: squat/kilo
  134. command:
  135. - /bin/sh
  136. - -c
  137. - set -e -x;
  138. cp /opt/cni/bin/* /host/opt/cni/bin/;
  139. TMP_CONF="$CNI_CONF_NAME".tmp;
  140. echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
  141. rm -f /host/etc/cni/net.d/*;
  142. mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
  143. env:
  144. - name: CNI_CONF_NAME
  145. value: 10-kilo.conflist
  146. - name: CNI_NETWORK_CONFIG
  147. valueFrom:
  148. configMapKeyRef:
  149. name: kilo
  150. key: cni-conf.json
  151. volumeMounts:
  152. - name: cni-bin-dir
  153. mountPath: /host/opt/cni/bin
  154. - name: cni-conf-dir
  155. mountPath: /host/etc/cni/net.d
  156. tolerations:
  157. - effect: NoSchedule
  158. operator: Exists
  159. - effect: NoExecute
  160. operator: Exists
  161. volumes:
  162. - name: cni-bin-dir
  163. hostPath:
  164. path: /opt/cni/bin
  165. - name: cni-conf-dir
  166. hostPath:
  167. path: /etc/cni/net.d
  168. - name: kilo-dir
  169. hostPath:
  170. path: /var/lib/kilo
  171. - name: kubeconfig
  172. hostPath:
  173. # Since kilo runs as a daemonset, it is recommended that you copy the
  174. # k3s.yaml kubeconfig file from the master node to all worker nodes
  175. # with the same path structure.
  176. path: /etc/rancher/k3s/k3s.yaml
  177. - name: lib-modules
  178. hostPath:
  179. path: /lib/modules
  180. - name: xtables-lock
  181. hostPath:
  182. path: /run/xtables.lock
  183. type: FileOrCreate
  184. ---
  185. apiVersion: apps/v1
  186. kind: DaemonSet
  187. metadata:
  188. name: kilo-userspace
  189. namespace: kube-system
  190. labels:
  191. app.kubernetes.io/name: kilo-userspace
  192. spec:
  193. selector:
  194. matchLabels:
  195. app.kubernetes.io/name: kilo-userspace
  196. template:
  197. metadata:
  198. labels:
  199. app.kubernetes.io/name: kilo-userspace
  200. spec:
  201. nodeSelector:
  202. nkml.squat.ai/wireguard: "false"
  203. serviceAccountName: kilo
  204. hostNetwork: true
  205. containers:
  206. - name: kilo
  207. image: squat/kilo
  208. args:
  209. - --kubeconfig=/etc/kubernetes/kubeconfig
  210. - --hostname=$(NODE_NAME)
  211. - --create-interface=false
  212. - --interface=kilo0
  213. env:
  214. - name: NODE_NAME
  215. valueFrom:
  216. fieldRef:
  217. fieldPath: spec.nodeName
  218. securityContext:
  219. privileged: true
  220. volumeMounts:
  221. - name: cni-conf-dir
  222. mountPath: /etc/cni/net.d
  223. - name: kilo-dir
  224. mountPath: /var/lib/kilo
  225. - name: kubeconfig
  226. mountPath: /etc/kubernetes/kubeconfig
  227. readOnly: true
  228. - name: lib-modules
  229. mountPath: /lib/modules
  230. readOnly: true
  231. - name: xtables-lock
  232. mountPath: /run/xtables.lock
  233. readOnly: false
  234. - name: wireguard
  235. mountPath: /var/run/wireguard
  236. readOnly: false
  237. - name: boringtun
  238. image: leonnicolas/boringtun
  239. args:
  240. - --disable-drop-privileges=true
  241. - --foreground
  242. - kilo0
  243. securityContext:
  244. privileged: true
  245. volumeMounts:
  246. - name: wireguard
  247. mountPath: /var/run/wireguard
  248. readOnly: false
  249. initContainers:
  250. - name: install-cni
  251. image: squat/kilo
  252. command:
  253. - /bin/sh
  254. - -c
  255. - set -e -x;
  256. cp /opt/cni/bin/* /host/opt/cni/bin/;
  257. TMP_CONF="$CNI_CONF_NAME".tmp;
  258. echo "$CNI_NETWORK_CONFIG" > $TMP_CONF;
  259. rm -f /host/etc/cni/net.d/*;
  260. mv $TMP_CONF /host/etc/cni/net.d/$CNI_CONF_NAME
  261. env:
  262. - name: CNI_CONF_NAME
  263. value: 10-kilo.conflist
  264. - name: CNI_NETWORK_CONFIG
  265. valueFrom:
  266. configMapKeyRef:
  267. name: kilo
  268. key: cni-conf.json
  269. volumeMounts:
  270. - name: cni-bin-dir
  271. mountPath: /host/opt/cni/bin
  272. - name: cni-conf-dir
  273. mountPath: /host/etc/cni/net.d
  274. tolerations:
  275. - effect: NoSchedule
  276. operator: Exists
  277. - effect: NoExecute
  278. operator: Exists
  279. volumes:
  280. - name: cni-bin-dir
  281. hostPath:
  282. path: /opt/cni/bin
  283. - name: cni-conf-dir
  284. hostPath:
  285. path: /etc/cni/net.d
  286. - name: kilo-dir
  287. hostPath:
  288. path: /var/lib/kilo
  289. - name: kubeconfig
  290. hostPath:
  291. # Since kilo runs as a daemonset, it is recommended that you copy the
  292. # k3s.yaml kubeconfig file from the master node to all worker nodes
  293. # with the same path structure.
  294. path: /etc/rancher/k3s/k3s.yaml
  295. - name: lib-modules
  296. hostPath:
  297. path: /lib/modules
  298. - name: xtables-lock
  299. hostPath:
  300. path: /run/xtables.lock
  301. type: FileOrCreate
  302. - name: wireguard
  303. hostPath:
  304. path: /var/run/wireguard
  305. ---
  306. kind: DaemonSet
  307. apiVersion: apps/v1
  308. metadata:
  309. name: nkml
  310. namespace: kube-system
  311. labels:
  312. app.kubernetes.io/name: nkml
  313. spec:
  314. selector:
  315. matchLabels:
  316. app.kubernetes.io/name: nkml
  317. template:
  318. metadata:
  319. labels:
  320. app.kubernetes.io/name: nkml
  321. spec:
  322. hostNetwork: true
  323. containers:
  324. - name: nkml
  325. image: leonnicolas/nkml
  326. args:
  327. - --hostname=$(NODE_NAME)
  328. - --label-mod=wireguard
  329. - --kubeconfig=/etc/kubernetes/kubeconfig
  330. env:
  331. - name: NODE_NAME
  332. valueFrom:
  333. fieldRef:
  334. fieldPath: spec.nodeName
  335. ports:
  336. - name: http
  337. containerPort: 8080
  338. volumeMounts:
  339. - name: kubeconfig
  340. mountPath: /etc/kubernetes/kubeconfig
  341. readOnly: true
  342. volumes:
  343. - name: kubeconfig
  344. hostPath:
  345. # since the above DaemonSets are dependant on the labels
  346. # and nkml would need a cni to start
  347. # it needs run on the hostnetwork and use the kubeconfig
  348. # to label the nodes
  349. path: /etc/rancher/k3s/k3s.yaml