mesh.go 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. // Copyright 2019 the Kilo authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // +build linux
  15. package mesh
  16. import (
  17. "bytes"
  18. "fmt"
  19. "io/ioutil"
  20. "net"
  21. "os"
  22. "sync"
  23. "time"
  24. "github.com/go-kit/kit/log"
  25. "github.com/go-kit/kit/log/level"
  26. "github.com/prometheus/client_golang/prometheus"
  27. "github.com/vishvananda/netlink"
  28. "github.com/squat/kilo/pkg/encapsulation"
  29. "github.com/squat/kilo/pkg/iproute"
  30. "github.com/squat/kilo/pkg/iptables"
  31. "github.com/squat/kilo/pkg/route"
  32. "github.com/squat/kilo/pkg/wireguard"
  33. )
  34. const (
  35. // kiloPath is the directory where Kilo stores its configuration.
  36. kiloPath = "/var/lib/kilo"
  37. // privateKeyPath is the filepath where the WireGuard private key is stored.
  38. privateKeyPath = kiloPath + "/key"
  39. // confPath is the filepath where the WireGuard configuration is stored.
  40. confPath = kiloPath + "/conf"
  41. )
  42. // Mesh is able to create Kilo network meshes.
  43. type Mesh struct {
  44. Backend
  45. cleanUpIface bool
  46. cni bool
  47. cniPath string
  48. enc encapsulation.Encapsulator
  49. externalIP *net.IPNet
  50. granularity Granularity
  51. hostname string
  52. internalIP *net.IPNet
  53. ipTables *iptables.Controller
  54. kiloIface int
  55. key []byte
  56. local bool
  57. port uint32
  58. priv []byte
  59. privIface int
  60. pub []byte
  61. resyncPeriod time.Duration
  62. stop chan struct{}
  63. subnet *net.IPNet
  64. table *route.Table
  65. wireGuardIP *net.IPNet
  66. // nodes and peers are mutable fields in the struct
  67. // and need to be guarded.
  68. nodes map[string]*Node
  69. peers map[string]*Peer
  70. mu sync.Mutex
  71. errorCounter *prometheus.CounterVec
  72. leaderGuage prometheus.Gauge
  73. nodesGuage prometheus.Gauge
  74. peersGuage prometheus.Gauge
  75. reconcileCounter prometheus.Counter
  76. logger log.Logger
  77. }
  78. // New returns a new Mesh instance.
  79. func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, createIface bool, resyncPeriod time.Duration, logger log.Logger) (*Mesh, error) {
  80. if err := os.MkdirAll(kiloPath, 0700); err != nil {
  81. return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
  82. }
  83. private, err := ioutil.ReadFile(privateKeyPath)
  84. private = bytes.Trim(private, "\n")
  85. if err != nil {
  86. level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
  87. if private, err = wireguard.GenKey(); err != nil {
  88. return nil, err
  89. }
  90. }
  91. public, err := wireguard.PubKey(private)
  92. if err != nil {
  93. return nil, err
  94. }
  95. if err := ioutil.WriteFile(privateKeyPath, private, 0600); err != nil {
  96. return nil, fmt.Errorf("failed to write private key to disk: %v", err)
  97. }
  98. cniIndex, err := cniDeviceIndex()
  99. if err != nil {
  100. return nil, fmt.Errorf("failed to query netlink for CNI device: %v", err)
  101. }
  102. var kiloIface int
  103. if createIface {
  104. kiloIface, _, err = wireguard.New(iface)
  105. if err != nil {
  106. return nil, fmt.Errorf("failed to create WireGuard interface: %v", err)
  107. }
  108. } else {
  109. link, err := netlink.LinkByName(iface)
  110. if err != nil {
  111. return nil, fmt.Errorf("failed to get interface index: %v", err)
  112. }
  113. kiloIface = link.Attrs().Index
  114. }
  115. privateIP, publicIP, err := getIP(hostname, kiloIface, enc.Index(), cniIndex)
  116. if err != nil {
  117. return nil, fmt.Errorf("failed to find public IP: %v", err)
  118. }
  119. var privIface int
  120. if privateIP != nil {
  121. ifaces, err := interfacesForIP(privateIP)
  122. if err != nil {
  123. return nil, fmt.Errorf("failed to find interface for private IP: %v", err)
  124. }
  125. privIface = ifaces[0].Index
  126. if enc.Strategy() != encapsulation.Never {
  127. if err := enc.Init(privIface); err != nil {
  128. return nil, fmt.Errorf("failed to initialize encapsulator: %v", err)
  129. }
  130. }
  131. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
  132. } else {
  133. enc = encapsulation.Noop(enc.Strategy())
  134. level.Debug(logger).Log("msg", "running without a private IP address")
  135. }
  136. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
  137. ipTables, err := iptables.New(iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod))
  138. if err != nil {
  139. return nil, fmt.Errorf("failed to IP tables controller: %v", err)
  140. }
  141. return &Mesh{
  142. Backend: backend,
  143. cleanUpIface: cleanUpIface,
  144. cni: cni,
  145. cniPath: cniPath,
  146. enc: enc,
  147. externalIP: publicIP,
  148. granularity: granularity,
  149. hostname: hostname,
  150. internalIP: privateIP,
  151. ipTables: ipTables,
  152. kiloIface: kiloIface,
  153. nodes: make(map[string]*Node),
  154. peers: make(map[string]*Peer),
  155. port: port,
  156. priv: private,
  157. privIface: privIface,
  158. pub: public,
  159. resyncPeriod: resyncPeriod,
  160. local: local,
  161. stop: make(chan struct{}),
  162. subnet: subnet,
  163. table: route.NewTable(),
  164. errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
  165. Name: "kilo_errors_total",
  166. Help: "Number of errors that occurred while administering the mesh.",
  167. }, []string{"event"}),
  168. leaderGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  169. Name: "kilo_leader",
  170. Help: "Leadership status of the node.",
  171. }),
  172. nodesGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  173. Name: "kilo_nodes",
  174. Help: "Number of nodes in the mesh.",
  175. }),
  176. peersGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  177. Name: "kilo_peers",
  178. Help: "Number of peers in the mesh.",
  179. }),
  180. reconcileCounter: prometheus.NewCounter(prometheus.CounterOpts{
  181. Name: "kilo_reconciles_total",
  182. Help: "Number of reconciliation attempts.",
  183. }),
  184. logger: logger,
  185. }, nil
  186. }
  187. // Run starts the mesh.
  188. func (m *Mesh) Run() error {
  189. if err := m.Nodes().Init(m.stop); err != nil {
  190. return fmt.Errorf("failed to initialize node backend: %v", err)
  191. }
  192. // Try to set the CNI config quickly.
  193. if m.cni {
  194. if n, err := m.Nodes().Get(m.hostname); err == nil {
  195. m.nodes[m.hostname] = n
  196. m.updateCNIConfig()
  197. } else {
  198. level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
  199. }
  200. }
  201. if err := m.Peers().Init(m.stop); err != nil {
  202. return fmt.Errorf("failed to initialize peer backend: %v", err)
  203. }
  204. ipTablesErrors, err := m.ipTables.Run(m.stop)
  205. if err != nil {
  206. return fmt.Errorf("failed to watch for IP tables updates: %v", err)
  207. }
  208. routeErrors, err := m.table.Run(m.stop)
  209. if err != nil {
  210. return fmt.Errorf("failed to watch for route table updates: %v", err)
  211. }
  212. go func() {
  213. for {
  214. var err error
  215. select {
  216. case err = <-ipTablesErrors:
  217. case err = <-routeErrors:
  218. case <-m.stop:
  219. return
  220. }
  221. if err != nil {
  222. level.Error(m.logger).Log("error", err)
  223. m.errorCounter.WithLabelValues("run").Inc()
  224. }
  225. }
  226. }()
  227. defer m.cleanUp()
  228. resync := time.NewTimer(m.resyncPeriod)
  229. checkIn := time.NewTimer(checkInPeriod)
  230. nw := m.Nodes().Watch()
  231. pw := m.Peers().Watch()
  232. var ne *NodeEvent
  233. var pe *PeerEvent
  234. for {
  235. select {
  236. case ne = <-nw:
  237. m.syncNodes(ne)
  238. case pe = <-pw:
  239. m.syncPeers(pe)
  240. case <-checkIn.C:
  241. m.checkIn()
  242. checkIn.Reset(checkInPeriod)
  243. case <-resync.C:
  244. if m.cni {
  245. m.updateCNIConfig()
  246. }
  247. m.applyTopology()
  248. resync.Reset(m.resyncPeriod)
  249. case <-m.stop:
  250. return nil
  251. }
  252. }
  253. }
  254. func (m *Mesh) syncNodes(e *NodeEvent) {
  255. logger := log.With(m.logger, "event", e.Type)
  256. level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
  257. if isSelf(m.hostname, e.Node) {
  258. level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
  259. m.handleLocal(e.Node)
  260. return
  261. }
  262. var diff bool
  263. m.mu.Lock()
  264. if !e.Node.Ready() {
  265. // Trace non ready nodes with their presence in the mesh.
  266. _, ok := m.nodes[e.Node.Name]
  267. level.Debug(logger).Log("msg", "received non ready node", "node", e.Node, "in-mesh", ok)
  268. }
  269. switch e.Type {
  270. case AddEvent:
  271. fallthrough
  272. case UpdateEvent:
  273. if !nodesAreEqual(m.nodes[e.Node.Name], e.Node) {
  274. diff = true
  275. }
  276. // Even if the nodes are the same,
  277. // overwrite the old node to update the timestamp.
  278. m.nodes[e.Node.Name] = e.Node
  279. case DeleteEvent:
  280. delete(m.nodes, e.Node.Name)
  281. diff = true
  282. }
  283. m.mu.Unlock()
  284. if diff {
  285. level.Info(logger).Log("node", e.Node)
  286. m.applyTopology()
  287. }
  288. }
  289. func (m *Mesh) syncPeers(e *PeerEvent) {
  290. logger := log.With(m.logger, "event", e.Type)
  291. level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
  292. var diff bool
  293. m.mu.Lock()
  294. // Peers are indexed by public key.
  295. key := string(e.Peer.PublicKey)
  296. if !e.Peer.Ready() {
  297. // Trace non ready peer with their presence in the mesh.
  298. _, ok := m.peers[key]
  299. level.Debug(logger).Log("msg", "received non ready peer", "peer", e.Peer, "in-mesh", ok)
  300. }
  301. switch e.Type {
  302. case AddEvent:
  303. fallthrough
  304. case UpdateEvent:
  305. if e.Old != nil && key != string(e.Old.PublicKey) {
  306. delete(m.peers, string(e.Old.PublicKey))
  307. diff = true
  308. }
  309. if !peersAreEqual(m.peers[key], e.Peer) {
  310. m.peers[key] = e.Peer
  311. diff = true
  312. }
  313. case DeleteEvent:
  314. delete(m.peers, key)
  315. diff = true
  316. }
  317. m.mu.Unlock()
  318. if diff {
  319. level.Info(logger).Log("peer", e.Peer)
  320. m.applyTopology()
  321. }
  322. }
  323. // checkIn will try to update the local node's LastSeen timestamp
  324. // in the backend.
  325. func (m *Mesh) checkIn() {
  326. m.mu.Lock()
  327. defer m.mu.Unlock()
  328. n := m.nodes[m.hostname]
  329. if n == nil {
  330. level.Debug(m.logger).Log("msg", "no local node found in backend")
  331. return
  332. }
  333. oldTime := n.LastSeen
  334. n.LastSeen = time.Now().Unix()
  335. if err := m.Nodes().Set(m.hostname, n); err != nil {
  336. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
  337. m.errorCounter.WithLabelValues("checkin").Inc()
  338. // Revert time.
  339. n.LastSeen = oldTime
  340. return
  341. }
  342. level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
  343. }
  344. func (m *Mesh) handleLocal(n *Node) {
  345. // Allow the IPs to be overridden.
  346. if n.Endpoint == nil || (n.Endpoint.DNS == "" && n.Endpoint.IP == nil) {
  347. n.Endpoint = &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: m.externalIP.IP}, Port: m.port}
  348. }
  349. if n.InternalIP == nil && !n.NoInternalIP {
  350. n.InternalIP = m.internalIP
  351. }
  352. // Compare the given node to the calculated local node.
  353. // Take leader, location, and subnet from the argument, as these
  354. // are not determined by kilo.
  355. local := &Node{
  356. Endpoint: n.Endpoint,
  357. Key: m.pub,
  358. NoInternalIP: n.NoInternalIP,
  359. InternalIP: n.InternalIP,
  360. LastSeen: time.Now().Unix(),
  361. Leader: n.Leader,
  362. Location: n.Location,
  363. Name: m.hostname,
  364. PersistentKeepalive: n.PersistentKeepalive,
  365. Subnet: n.Subnet,
  366. WireGuardIP: m.wireGuardIP,
  367. DiscoveredEndpoints: n.DiscoveredEndpoints,
  368. AllowedLocationIPs: n.AllowedLocationIPs,
  369. Granularity: m.granularity,
  370. }
  371. if !nodesAreEqual(n, local) {
  372. level.Debug(m.logger).Log("msg", "local node differs from backend")
  373. if err := m.Nodes().Set(m.hostname, local); err != nil {
  374. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
  375. m.errorCounter.WithLabelValues("local").Inc()
  376. return
  377. }
  378. level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
  379. }
  380. m.mu.Lock()
  381. n = m.nodes[m.hostname]
  382. if n == nil {
  383. n = &Node{}
  384. }
  385. m.mu.Unlock()
  386. if !nodesAreEqual(n, local) {
  387. m.mu.Lock()
  388. m.nodes[local.Name] = local
  389. m.mu.Unlock()
  390. m.applyTopology()
  391. }
  392. }
  393. func (m *Mesh) applyTopology() {
  394. m.reconcileCounter.Inc()
  395. m.mu.Lock()
  396. defer m.mu.Unlock()
  397. // If we can't resolve an endpoint, then fail and retry later.
  398. if err := m.resolveEndpoints(); err != nil {
  399. level.Error(m.logger).Log("error", err)
  400. m.errorCounter.WithLabelValues("apply").Inc()
  401. return
  402. }
  403. // Ensure only ready nodes are considered.
  404. nodes := make(map[string]*Node)
  405. var readyNodes float64
  406. for k := range m.nodes {
  407. m.nodes[k].Granularity = m.granularity
  408. if !m.nodes[k].Ready() {
  409. continue
  410. }
  411. // Make it point to the node without copy.
  412. nodes[k] = m.nodes[k]
  413. readyNodes++
  414. }
  415. // Ensure only ready nodes are considered.
  416. peers := make(map[string]*Peer)
  417. var readyPeers float64
  418. for k := range m.peers {
  419. if !m.peers[k].Ready() {
  420. continue
  421. }
  422. // Make it point the peer without copy.
  423. peers[k] = m.peers[k]
  424. readyPeers++
  425. }
  426. m.nodesGuage.Set(readyNodes)
  427. m.peersGuage.Set(readyPeers)
  428. // We cannot do anything with the topology until the local node is available.
  429. if nodes[m.hostname] == nil {
  430. return
  431. }
  432. // Find the Kilo interface name.
  433. link, err := linkByIndex(m.kiloIface)
  434. if err != nil {
  435. level.Error(m.logger).Log("error", err)
  436. m.errorCounter.WithLabelValues("apply").Inc()
  437. return
  438. }
  439. // Find the old configuration.
  440. oldConfDump, err := wireguard.ShowDump(link.Attrs().Name)
  441. if err != nil {
  442. level.Error(m.logger).Log("error", err)
  443. m.errorCounter.WithLabelValues("apply").Inc()
  444. return
  445. }
  446. oldConf, err := wireguard.ParseDump(oldConfDump)
  447. if err != nil {
  448. level.Error(m.logger).Log("error", err)
  449. m.errorCounter.WithLabelValues("apply").Inc()
  450. return
  451. }
  452. natEndpoints := discoverNATEndpoints(nodes, peers, oldConf, m.logger)
  453. nodes[m.hostname].DiscoveredEndpoints = natEndpoints
  454. t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port, m.priv, m.subnet, nodes[m.hostname].PersistentKeepalive, m.logger)
  455. if err != nil {
  456. level.Error(m.logger).Log("error", err)
  457. m.errorCounter.WithLabelValues("apply").Inc()
  458. return
  459. }
  460. // Update the node's WireGuard IP.
  461. if t.leader {
  462. m.wireGuardIP = t.wireGuardCIDR
  463. } else {
  464. m.wireGuardIP = nil
  465. }
  466. conf := t.Conf()
  467. buf, err := conf.Bytes()
  468. if err != nil {
  469. level.Error(m.logger).Log("error", err)
  470. m.errorCounter.WithLabelValues("apply").Inc()
  471. return
  472. }
  473. if err := ioutil.WriteFile(confPath, buf, 0600); err != nil {
  474. level.Error(m.logger).Log("error", err)
  475. m.errorCounter.WithLabelValues("apply").Inc()
  476. return
  477. }
  478. ipRules := t.Rules(m.cni)
  479. // If we are handling local routes, ensure the local
  480. // tunnel has an IP address and IPIP traffic is allowed.
  481. if m.enc.Strategy() != encapsulation.Never && m.local {
  482. var cidrs []*net.IPNet
  483. for _, s := range t.segments {
  484. // If the location prefix is not logicalLocation, but nodeLocation,
  485. // we don't need to set any extra rules for encapsulation anyways
  486. // because traffic will go over WireGuard.
  487. if s.location == logicalLocationPrefix+nodes[m.hostname].Location {
  488. for i := range s.privateIPs {
  489. cidrs = append(cidrs, oneAddressCIDR(s.privateIPs[i]))
  490. }
  491. break
  492. }
  493. }
  494. ipRules = append(ipRules, m.enc.Rules(cidrs)...)
  495. // If we are handling local routes, ensure the local
  496. // tunnel has an IP address.
  497. if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
  498. level.Error(m.logger).Log("error", err)
  499. m.errorCounter.WithLabelValues("apply").Inc()
  500. return
  501. }
  502. }
  503. if err := m.ipTables.Set(ipRules); err != nil {
  504. level.Error(m.logger).Log("error", err)
  505. m.errorCounter.WithLabelValues("apply").Inc()
  506. return
  507. }
  508. if t.leader {
  509. m.leaderGuage.Set(1)
  510. if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
  511. level.Error(m.logger).Log("error", err)
  512. m.errorCounter.WithLabelValues("apply").Inc()
  513. return
  514. }
  515. // Setting the WireGuard configuration interrupts existing connections
  516. // so only set the configuration if it has changed.
  517. equal := conf.Equal(oldConf)
  518. if !equal {
  519. level.Info(m.logger).Log("msg", "WireGuard configurations are different")
  520. if err := wireguard.SetConf(link.Attrs().Name, confPath); err != nil {
  521. level.Error(m.logger).Log("error", err)
  522. m.errorCounter.WithLabelValues("apply").Inc()
  523. return
  524. }
  525. }
  526. if err := iproute.Set(m.kiloIface, true); err != nil {
  527. level.Error(m.logger).Log("error", err)
  528. m.errorCounter.WithLabelValues("apply").Inc()
  529. return
  530. }
  531. } else {
  532. m.leaderGuage.Set(0)
  533. level.Debug(m.logger).Log("msg", "local node is not the leader")
  534. if err := iproute.Set(m.kiloIface, false); err != nil {
  535. level.Error(m.logger).Log("error", err)
  536. m.errorCounter.WithLabelValues("apply").Inc()
  537. return
  538. }
  539. }
  540. // We need to add routes last since they may depend
  541. // on the WireGuard interface.
  542. routes, rules := t.Routes(link.Attrs().Name, m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc)
  543. if err := m.table.Set(routes, rules); err != nil {
  544. level.Error(m.logger).Log("error", err)
  545. m.errorCounter.WithLabelValues("apply").Inc()
  546. }
  547. }
  548. // RegisterMetrics registers Prometheus metrics on the given Prometheus
  549. // registerer.
  550. func (m *Mesh) RegisterMetrics(r prometheus.Registerer) {
  551. r.MustRegister(
  552. m.errorCounter,
  553. m.leaderGuage,
  554. m.nodesGuage,
  555. m.peersGuage,
  556. m.reconcileCounter,
  557. )
  558. }
  559. // Stop stops the mesh.
  560. func (m *Mesh) Stop() {
  561. close(m.stop)
  562. }
  563. func (m *Mesh) cleanUp() {
  564. if err := m.ipTables.CleanUp(); err != nil {
  565. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
  566. m.errorCounter.WithLabelValues("cleanUp").Inc()
  567. }
  568. if err := m.table.CleanUp(); err != nil {
  569. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
  570. m.errorCounter.WithLabelValues("cleanUp").Inc()
  571. }
  572. if err := os.Remove(confPath); err != nil {
  573. level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
  574. m.errorCounter.WithLabelValues("cleanUp").Inc()
  575. }
  576. if m.cleanUpIface {
  577. if err := iproute.RemoveInterface(m.kiloIface); err != nil {
  578. level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
  579. m.errorCounter.WithLabelValues("cleanUp").Inc()
  580. }
  581. }
  582. if err := m.Nodes().CleanUp(m.hostname); err != nil {
  583. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
  584. m.errorCounter.WithLabelValues("cleanUp").Inc()
  585. }
  586. if err := m.Peers().CleanUp(m.hostname); err != nil {
  587. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
  588. m.errorCounter.WithLabelValues("cleanUp").Inc()
  589. }
  590. if err := m.enc.CleanUp(); err != nil {
  591. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
  592. m.errorCounter.WithLabelValues("cleanUp").Inc()
  593. }
  594. }
  595. func (m *Mesh) resolveEndpoints() error {
  596. for k := range m.nodes {
  597. // Skip unready nodes, since they will not be used
  598. // in the topology anyways.
  599. if !m.nodes[k].Ready() {
  600. continue
  601. }
  602. // If the node is ready, then the endpoint is not nil
  603. // but it may not have a DNS name.
  604. if m.nodes[k].Endpoint.DNS == "" {
  605. continue
  606. }
  607. if err := resolveEndpoint(m.nodes[k].Endpoint); err != nil {
  608. return err
  609. }
  610. }
  611. for k := range m.peers {
  612. // Skip unready peers, since they will not be used
  613. // in the topology anyways.
  614. if !m.peers[k].Ready() {
  615. continue
  616. }
  617. // Peers may have nil endpoints.
  618. if m.peers[k].Endpoint == nil || m.peers[k].Endpoint.DNS == "" {
  619. continue
  620. }
  621. if err := resolveEndpoint(m.peers[k].Endpoint); err != nil {
  622. return err
  623. }
  624. }
  625. return nil
  626. }
  627. func resolveEndpoint(endpoint *wireguard.Endpoint) error {
  628. ips, err := net.LookupIP(endpoint.DNS)
  629. if err != nil {
  630. return fmt.Errorf("failed to look up DNS name %q: %v", endpoint.DNS, err)
  631. }
  632. nets := make([]*net.IPNet, len(ips), len(ips))
  633. for i := range ips {
  634. nets[i] = oneAddressCIDR(ips[i])
  635. }
  636. sortIPs(nets)
  637. if len(nets) == 0 {
  638. return fmt.Errorf("did not find any addresses for DNS name %q", endpoint.DNS)
  639. }
  640. endpoint.IP = nets[0].IP
  641. return nil
  642. }
  643. func isSelf(hostname string, node *Node) bool {
  644. return node != nil && node.Name == hostname
  645. }
  646. func nodesAreEqual(a, b *Node) bool {
  647. if (a != nil) != (b != nil) {
  648. return false
  649. }
  650. if a == b {
  651. return true
  652. }
  653. // Check the DNS name first since this package
  654. // is doing the DNS resolution.
  655. if !a.Endpoint.Equal(b.Endpoint, true) {
  656. return false
  657. }
  658. // Ignore LastSeen when comparing equality we want to check if the nodes are
  659. // equivalent. However, we do want to check if LastSeen has transitioned
  660. // between valid and invalid.
  661. return string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready() && a.PersistentKeepalive == b.PersistentKeepalive && discoveredEndpointsAreEqual(a.DiscoveredEndpoints, b.DiscoveredEndpoints) && ipNetSlicesEqual(a.AllowedLocationIPs, b.AllowedLocationIPs) && a.Granularity == b.Granularity
  662. }
  663. func peersAreEqual(a, b *Peer) bool {
  664. if !(a != nil) == (b != nil) {
  665. return false
  666. }
  667. if a == b {
  668. return true
  669. }
  670. // Check the DNS name first since this package
  671. // is doing the DNS resolution.
  672. if !a.Endpoint.Equal(b.Endpoint, true) {
  673. return false
  674. }
  675. if len(a.AllowedIPs) != len(b.AllowedIPs) {
  676. return false
  677. }
  678. for i := range a.AllowedIPs {
  679. if !ipNetsEqual(a.AllowedIPs[i], b.AllowedIPs[i]) {
  680. return false
  681. }
  682. }
  683. return string(a.PublicKey) == string(b.PublicKey) && string(a.PresharedKey) == string(b.PresharedKey) && a.PersistentKeepalive == b.PersistentKeepalive
  684. }
  685. func ipNetsEqual(a, b *net.IPNet) bool {
  686. if a == nil && b == nil {
  687. return true
  688. }
  689. if (a != nil) != (b != nil) {
  690. return false
  691. }
  692. if a.Mask.String() != b.Mask.String() {
  693. return false
  694. }
  695. return a.IP.Equal(b.IP)
  696. }
  697. func ipNetSlicesEqual(a, b []*net.IPNet) bool {
  698. if len(a) != len(b) {
  699. return false
  700. }
  701. for i := range a {
  702. if !ipNetsEqual(a[i], b[i]) {
  703. return false
  704. }
  705. }
  706. return true
  707. }
  708. func subnetsEqual(a, b *net.IPNet) bool {
  709. if a == nil && b == nil {
  710. return true
  711. }
  712. if (a != nil) != (b != nil) {
  713. return false
  714. }
  715. if a.Mask.String() != b.Mask.String() {
  716. return false
  717. }
  718. if !a.Contains(b.IP) {
  719. return false
  720. }
  721. if !b.Contains(a.IP) {
  722. return false
  723. }
  724. return true
  725. }
  726. func discoveredEndpointsAreEqual(a, b map[string]*wireguard.Endpoint) bool {
  727. if a == nil && b == nil {
  728. return true
  729. }
  730. if (a != nil) != (b != nil) {
  731. return false
  732. }
  733. if len(a) != len(b) {
  734. return false
  735. }
  736. for k := range a {
  737. if !a[k].Equal(b[k], false) {
  738. return false
  739. }
  740. }
  741. return true
  742. }
  743. func linkByIndex(index int) (netlink.Link, error) {
  744. link, err := netlink.LinkByIndex(index)
  745. if err != nil {
  746. return nil, fmt.Errorf("failed to get interface: %v", err)
  747. }
  748. return link, nil
  749. }
  750. // discoverNATEndpoints uses the node's WireGuard configuration to returns a list of the most recently discovered endpoints for all nodes and peers behind NAT so that they can roam.
  751. func discoverNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *wireguard.Conf, logger log.Logger) map[string]*wireguard.Endpoint {
  752. natEndpoints := make(map[string]*wireguard.Endpoint)
  753. keys := make(map[string]*wireguard.Peer)
  754. for i := range conf.Peers {
  755. keys[string(conf.Peers[i].PublicKey)] = conf.Peers[i]
  756. }
  757. for _, n := range nodes {
  758. if peer, ok := keys[string(n.Key)]; ok && n.PersistentKeepalive > 0 {
  759. level.Debug(logger).Log("msg", "WireGuard Update NAT Endpoint", "node", n.Name, "endpoint", peer.Endpoint, "former-endpoint", n.Endpoint, "same", n.Endpoint.Equal(peer.Endpoint, false), "latest-handshake", peer.LatestHandshake)
  760. if (peer.LatestHandshake != time.Time{}) {
  761. natEndpoints[string(n.Key)] = peer.Endpoint
  762. }
  763. }
  764. }
  765. for _, p := range peers {
  766. if peer, ok := keys[string(p.PublicKey)]; ok && p.PersistentKeepalive > 0 {
  767. if (peer.LatestHandshake != time.Time{}) {
  768. natEndpoints[string(p.PublicKey)] = peer.Endpoint
  769. }
  770. }
  771. }
  772. level.Debug(logger).Log("msg", "Discovered WireGuard NAT Endpoints", "DiscoveredEndpoints", natEndpoints)
  773. return natEndpoints
  774. }