mesh.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. // Copyright 2019 the Kilo authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // +build linux
  15. package mesh
  16. import (
  17. "bytes"
  18. "fmt"
  19. "io/ioutil"
  20. "net"
  21. "os"
  22. "sync"
  23. "time"
  24. "github.com/go-kit/kit/log"
  25. "github.com/go-kit/kit/log/level"
  26. "github.com/prometheus/client_golang/prometheus"
  27. "github.com/vishvananda/netlink"
  28. "github.com/squat/kilo/pkg/encapsulation"
  29. "github.com/squat/kilo/pkg/iproute"
  30. "github.com/squat/kilo/pkg/iptables"
  31. "github.com/squat/kilo/pkg/route"
  32. "github.com/squat/kilo/pkg/wireguard"
  33. )
  34. const (
  35. // kiloPath is the directory where Kilo stores its configuration.
  36. kiloPath = "/var/lib/kilo"
  37. // privateKeyPath is the filepath where the WireGuard private key is stored.
  38. privateKeyPath = kiloPath + "/key"
  39. // confPath is the filepath where the WireGuard configuration is stored.
  40. confPath = kiloPath + "/conf"
  41. )
  42. // Mesh is able to create Kilo network meshes.
  43. type Mesh struct {
  44. Backend
  45. cleanUpIface bool
  46. cni bool
  47. cniPath string
  48. enc encapsulation.Encapsulator
  49. externalIP *net.IPNet
  50. granularity Granularity
  51. hostname string
  52. internalIP *net.IPNet
  53. ipTables *iptables.Controller
  54. kiloIface int
  55. key []byte
  56. local bool
  57. port uint32
  58. priv []byte
  59. privIface int
  60. pub []byte
  61. resyncPeriod time.Duration
  62. stop chan struct{}
  63. subnet *net.IPNet
  64. table *route.Table
  65. wireGuardIP *net.IPNet
  66. // nodes and peers are mutable fields in the struct
  67. // and need to be guarded.
  68. nodes map[string]*Node
  69. peers map[string]*Peer
  70. mu sync.Mutex
  71. errorCounter *prometheus.CounterVec
  72. leaderGuage prometheus.Gauge
  73. nodesGuage prometheus.Gauge
  74. peersGuage prometheus.Gauge
  75. reconcileCounter prometheus.Counter
  76. logger log.Logger
  77. }
  78. // New returns a new Mesh instance.
  79. func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, createIface bool, resyncPeriod time.Duration, logger log.Logger) (*Mesh, error) {
  80. if err := os.MkdirAll(kiloPath, 0700); err != nil {
  81. return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
  82. }
  83. private, err := ioutil.ReadFile(privateKeyPath)
  84. private = bytes.Trim(private, "\n")
  85. if err != nil {
  86. level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
  87. if private, err = wireguard.GenKey(); err != nil {
  88. return nil, err
  89. }
  90. }
  91. public, err := wireguard.PubKey(private)
  92. if err != nil {
  93. return nil, err
  94. }
  95. if err := ioutil.WriteFile(privateKeyPath, private, 0600); err != nil {
  96. return nil, fmt.Errorf("failed to write private key to disk: %v", err)
  97. }
  98. cniIndex, err := cniDeviceIndex()
  99. if err != nil {
  100. return nil, fmt.Errorf("failed to query netlink for CNI device: %v", err)
  101. }
  102. var kiloIface int
  103. if createIface {
  104. kiloIface, _, err = wireguard.New(iface)
  105. if err != nil {
  106. return nil, fmt.Errorf("failed to create WireGuard interface: %v", err)
  107. }
  108. } else {
  109. link, err := netlink.LinkByName(iface)
  110. if err != nil {
  111. return nil, fmt.Errorf("failed to get interface index: %v", err)
  112. }
  113. kiloIface = link.Attrs().Index
  114. }
  115. privateIP, publicIP, err := getIP(hostname, kiloIface, enc.Index(), cniIndex)
  116. if err != nil {
  117. return nil, fmt.Errorf("failed to find public IP: %v", err)
  118. }
  119. var privIface int
  120. if privateIP != nil {
  121. ifaces, err := interfacesForIP(privateIP)
  122. if err != nil {
  123. return nil, fmt.Errorf("failed to find interface for private IP: %v", err)
  124. }
  125. privIface = ifaces[0].Index
  126. if enc.Strategy() != encapsulation.Never {
  127. if err := enc.Init(privIface); err != nil {
  128. return nil, fmt.Errorf("failed to initialize encapsulator: %v", err)
  129. }
  130. }
  131. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
  132. } else {
  133. enc = encapsulation.Noop(enc.Strategy())
  134. level.Debug(logger).Log("msg", "running without a private IP address")
  135. }
  136. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
  137. ipTables, err := iptables.New(iptables.WithLogger(log.With(logger, "component", "iptables")), iptables.WithResyncPeriod(resyncPeriod))
  138. if err != nil {
  139. return nil, fmt.Errorf("failed to IP tables controller: %v", err)
  140. }
  141. return &Mesh{
  142. Backend: backend,
  143. cleanUpIface: cleanUpIface,
  144. cni: cni,
  145. cniPath: cniPath,
  146. enc: enc,
  147. externalIP: publicIP,
  148. granularity: granularity,
  149. hostname: hostname,
  150. internalIP: privateIP,
  151. ipTables: ipTables,
  152. kiloIface: kiloIface,
  153. nodes: make(map[string]*Node),
  154. peers: make(map[string]*Peer),
  155. port: port,
  156. priv: private,
  157. privIface: privIface,
  158. pub: public,
  159. resyncPeriod: resyncPeriod,
  160. local: local,
  161. stop: make(chan struct{}),
  162. subnet: subnet,
  163. table: route.NewTable(),
  164. errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
  165. Name: "kilo_errors_total",
  166. Help: "Number of errors that occurred while administering the mesh.",
  167. }, []string{"event"}),
  168. leaderGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  169. Name: "kilo_leader",
  170. Help: "Leadership status of the node.",
  171. }),
  172. nodesGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  173. Name: "kilo_nodes",
  174. Help: "Number of nodes in the mesh.",
  175. }),
  176. peersGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  177. Name: "kilo_peers",
  178. Help: "Number of peers in the mesh.",
  179. }),
  180. reconcileCounter: prometheus.NewCounter(prometheus.CounterOpts{
  181. Name: "kilo_reconciles_total",
  182. Help: "Number of reconciliation attempts.",
  183. }),
  184. logger: logger,
  185. }, nil
  186. }
  187. // Run starts the mesh.
  188. func (m *Mesh) Run() error {
  189. if err := m.Nodes().Init(m.stop); err != nil {
  190. return fmt.Errorf("failed to initialize node backend: %v", err)
  191. }
  192. // Try to set the CNI config quickly.
  193. if m.cni {
  194. if n, err := m.Nodes().Get(m.hostname); err == nil {
  195. m.nodes[m.hostname] = n
  196. m.updateCNIConfig()
  197. } else {
  198. level.Warn(m.logger).Log("error", fmt.Errorf("failed to get node %q: %v", m.hostname, err))
  199. }
  200. }
  201. if err := m.Peers().Init(m.stop); err != nil {
  202. return fmt.Errorf("failed to initialize peer backend: %v", err)
  203. }
  204. ipTablesErrors, err := m.ipTables.Run(m.stop)
  205. if err != nil {
  206. return fmt.Errorf("failed to watch for IP tables updates: %v", err)
  207. }
  208. routeErrors, err := m.table.Run(m.stop)
  209. if err != nil {
  210. return fmt.Errorf("failed to watch for route table updates: %v", err)
  211. }
  212. go func() {
  213. for {
  214. var err error
  215. select {
  216. case err = <-ipTablesErrors:
  217. case err = <-routeErrors:
  218. case <-m.stop:
  219. return
  220. }
  221. if err != nil {
  222. level.Error(m.logger).Log("error", err)
  223. m.errorCounter.WithLabelValues("run").Inc()
  224. }
  225. }
  226. }()
  227. defer m.cleanUp()
  228. resync := time.NewTimer(m.resyncPeriod)
  229. checkIn := time.NewTimer(checkInPeriod)
  230. nw := m.Nodes().Watch()
  231. pw := m.Peers().Watch()
  232. var ne *NodeEvent
  233. var pe *PeerEvent
  234. for {
  235. select {
  236. case ne = <-nw:
  237. m.syncNodes(ne)
  238. case pe = <-pw:
  239. m.syncPeers(pe)
  240. case <-checkIn.C:
  241. m.checkIn()
  242. checkIn.Reset(checkInPeriod)
  243. case <-resync.C:
  244. if m.cni {
  245. m.updateCNIConfig()
  246. }
  247. m.applyTopology()
  248. resync.Reset(m.resyncPeriod)
  249. case <-m.stop:
  250. return nil
  251. }
  252. }
  253. }
  254. func (m *Mesh) syncNodes(e *NodeEvent) {
  255. logger := log.With(m.logger, "event", e.Type)
  256. level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
  257. if isSelf(m.hostname, e.Node) {
  258. level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
  259. m.handleLocal(e.Node)
  260. return
  261. }
  262. var diff bool
  263. m.mu.Lock()
  264. if !e.Node.Ready() {
  265. level.Debug(logger).Log("msg", "received incomplete node", "node", e.Node)
  266. // An existing node is no longer valid
  267. // so remove it from the mesh.
  268. if _, ok := m.nodes[e.Node.Name]; ok {
  269. level.Info(logger).Log("msg", "node is no longer ready", "node", e.Node)
  270. diff = true
  271. }
  272. } else {
  273. switch e.Type {
  274. case AddEvent:
  275. fallthrough
  276. case UpdateEvent:
  277. if !nodesAreEqual(m.nodes[e.Node.Name], e.Node) {
  278. diff = true
  279. }
  280. // Even if the nodes are the same,
  281. // overwrite the old node to update the timestamp.
  282. m.nodes[e.Node.Name] = e.Node
  283. case DeleteEvent:
  284. delete(m.nodes, e.Node.Name)
  285. diff = true
  286. }
  287. }
  288. m.mu.Unlock()
  289. if diff {
  290. level.Info(logger).Log("node", e.Node)
  291. m.applyTopology()
  292. }
  293. }
  294. func (m *Mesh) syncPeers(e *PeerEvent) {
  295. logger := log.With(m.logger, "event", e.Type)
  296. level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
  297. var diff bool
  298. m.mu.Lock()
  299. // Peers are indexed by public key.
  300. key := string(e.Peer.PublicKey)
  301. if !e.Peer.Ready() {
  302. level.Debug(logger).Log("msg", "received incomplete peer", "peer", e.Peer)
  303. // An existing peer is no longer valid
  304. // so remove it from the mesh.
  305. if _, ok := m.peers[key]; ok {
  306. level.Info(logger).Log("msg", "peer is no longer ready", "peer", e.Peer)
  307. diff = true
  308. }
  309. } else {
  310. switch e.Type {
  311. case AddEvent:
  312. fallthrough
  313. case UpdateEvent:
  314. if e.Old != nil && key != string(e.Old.PublicKey) {
  315. delete(m.peers, string(e.Old.PublicKey))
  316. diff = true
  317. }
  318. if !peersAreEqual(m.peers[key], e.Peer) {
  319. m.peers[key] = e.Peer
  320. diff = true
  321. }
  322. case DeleteEvent:
  323. delete(m.peers, key)
  324. diff = true
  325. }
  326. }
  327. m.mu.Unlock()
  328. if diff {
  329. level.Info(logger).Log("peer", e.Peer)
  330. m.applyTopology()
  331. }
  332. }
  333. // checkIn will try to update the local node's LastSeen timestamp
  334. // in the backend.
  335. func (m *Mesh) checkIn() {
  336. m.mu.Lock()
  337. defer m.mu.Unlock()
  338. n := m.nodes[m.hostname]
  339. if n == nil {
  340. level.Debug(m.logger).Log("msg", "no local node found in backend")
  341. return
  342. }
  343. oldTime := n.LastSeen
  344. n.LastSeen = time.Now().Unix()
  345. if err := m.Nodes().Set(m.hostname, n); err != nil {
  346. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
  347. m.errorCounter.WithLabelValues("checkin").Inc()
  348. // Revert time.
  349. n.LastSeen = oldTime
  350. return
  351. }
  352. level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
  353. }
  354. func (m *Mesh) handleLocal(n *Node) {
  355. // Allow the IPs to be overridden.
  356. if n.Endpoint == nil || (n.Endpoint.DNS == "" && n.Endpoint.IP == nil) {
  357. n.Endpoint = &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: m.externalIP.IP}, Port: m.port}
  358. }
  359. if n.InternalIP == nil && !n.NoInternalIP {
  360. n.InternalIP = m.internalIP
  361. }
  362. // Compare the given node to the calculated local node.
  363. // Take leader, location, and subnet from the argument, as these
  364. // are not determined by kilo.
  365. local := &Node{
  366. Endpoint: n.Endpoint,
  367. Key: m.pub,
  368. NoInternalIP: n.NoInternalIP,
  369. InternalIP: n.InternalIP,
  370. LastSeen: time.Now().Unix(),
  371. Leader: n.Leader,
  372. Location: n.Location,
  373. Name: m.hostname,
  374. PersistentKeepalive: n.PersistentKeepalive,
  375. Subnet: n.Subnet,
  376. WireGuardIP: m.wireGuardIP,
  377. }
  378. if !nodesAreEqual(n, local) {
  379. level.Debug(m.logger).Log("msg", "local node differs from backend")
  380. if err := m.Nodes().Set(m.hostname, local); err != nil {
  381. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
  382. m.errorCounter.WithLabelValues("local").Inc()
  383. return
  384. }
  385. level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
  386. }
  387. m.mu.Lock()
  388. n = m.nodes[m.hostname]
  389. if n == nil {
  390. n = &Node{}
  391. }
  392. m.mu.Unlock()
  393. if !nodesAreEqual(n, local) {
  394. m.mu.Lock()
  395. m.nodes[local.Name] = local
  396. m.mu.Unlock()
  397. m.applyTopology()
  398. }
  399. }
  400. func (m *Mesh) applyTopology() {
  401. m.reconcileCounter.Inc()
  402. m.mu.Lock()
  403. defer m.mu.Unlock()
  404. // If we can't resolve an endpoint, then fail and retry later.
  405. if err := m.resolveEndpoints(); err != nil {
  406. level.Error(m.logger).Log("error", err)
  407. m.errorCounter.WithLabelValues("apply").Inc()
  408. return
  409. }
  410. // Ensure only ready nodes are considered.
  411. nodes := make(map[string]*Node)
  412. var readyNodes float64
  413. for k := range m.nodes {
  414. if !m.nodes[k].Ready() {
  415. continue
  416. }
  417. // Make a shallow copy of the node.
  418. node := *m.nodes[k]
  419. nodes[k] = &node
  420. readyNodes++
  421. }
  422. // Ensure only ready nodes are considered.
  423. peers := make(map[string]*Peer)
  424. var readyPeers float64
  425. for k := range m.peers {
  426. if !m.peers[k].Ready() {
  427. continue
  428. }
  429. // Make a shallow copy of the peer.
  430. peer := *m.peers[k]
  431. peers[k] = &peer
  432. readyPeers++
  433. }
  434. m.nodesGuage.Set(readyNodes)
  435. m.peersGuage.Set(readyPeers)
  436. // We cannot do anything with the topology until the local node is available.
  437. if nodes[m.hostname] == nil {
  438. return
  439. }
  440. // Find the Kilo interface name.
  441. link, err := linkByIndex(m.kiloIface)
  442. if err != nil {
  443. level.Error(m.logger).Log("error", err)
  444. m.errorCounter.WithLabelValues("apply").Inc()
  445. return
  446. }
  447. // Find the old configuration.
  448. oldConfRaw, err := wireguard.ShowConf(link.Attrs().Name)
  449. if err != nil {
  450. level.Error(m.logger).Log("error", err)
  451. m.errorCounter.WithLabelValues("apply").Inc()
  452. return
  453. }
  454. oldConf := wireguard.Parse(oldConfRaw)
  455. updateNATEndpoints(nodes, peers, oldConf)
  456. t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port, m.priv, m.subnet, nodes[m.hostname].PersistentKeepalive)
  457. if err != nil {
  458. level.Error(m.logger).Log("error", err)
  459. m.errorCounter.WithLabelValues("apply").Inc()
  460. return
  461. }
  462. // Update the node's WireGuard IP.
  463. if t.leader {
  464. m.wireGuardIP = t.wireGuardCIDR
  465. } else {
  466. m.wireGuardIP = nil
  467. }
  468. conf := t.Conf()
  469. buf, err := conf.Bytes()
  470. if err != nil {
  471. level.Error(m.logger).Log("error", err)
  472. m.errorCounter.WithLabelValues("apply").Inc()
  473. return
  474. }
  475. if err := ioutil.WriteFile(confPath, buf, 0600); err != nil {
  476. level.Error(m.logger).Log("error", err)
  477. m.errorCounter.WithLabelValues("apply").Inc()
  478. return
  479. }
  480. ipRules := t.Rules(m.cni)
  481. // If we are handling local routes, ensure the local
  482. // tunnel has an IP address and IPIP traffic is allowed.
  483. if m.enc.Strategy() != encapsulation.Never && m.local {
  484. var cidrs []*net.IPNet
  485. for _, s := range t.segments {
  486. // If the location prefix is not logicalLocation, but nodeLocation,
  487. // we don't need to set any extra rules for encapsulation anyways
  488. // because traffic will go over WireGuard.
  489. if s.location == logicalLocationPrefix+nodes[m.hostname].Location {
  490. for i := range s.privateIPs {
  491. cidrs = append(cidrs, oneAddressCIDR(s.privateIPs[i]))
  492. }
  493. break
  494. }
  495. }
  496. ipRules = append(ipRules, m.enc.Rules(cidrs)...)
  497. // If we are handling local routes, ensure the local
  498. // tunnel has an IP address.
  499. if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
  500. level.Error(m.logger).Log("error", err)
  501. m.errorCounter.WithLabelValues("apply").Inc()
  502. return
  503. }
  504. }
  505. if err := m.ipTables.Set(ipRules); err != nil {
  506. level.Error(m.logger).Log("error", err)
  507. m.errorCounter.WithLabelValues("apply").Inc()
  508. return
  509. }
  510. if t.leader {
  511. m.leaderGuage.Set(1)
  512. if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
  513. level.Error(m.logger).Log("error", err)
  514. m.errorCounter.WithLabelValues("apply").Inc()
  515. return
  516. }
  517. // Setting the WireGuard configuration interrupts existing connections
  518. // so only set the configuration if it has changed.
  519. equal := conf.Equal(oldConf)
  520. if !equal {
  521. level.Info(m.logger).Log("msg", "WireGuard configurations are different")
  522. if err := wireguard.SetConf(link.Attrs().Name, confPath); err != nil {
  523. level.Error(m.logger).Log("error", err)
  524. m.errorCounter.WithLabelValues("apply").Inc()
  525. return
  526. }
  527. }
  528. if err := iproute.Set(m.kiloIface, true); err != nil {
  529. level.Error(m.logger).Log("error", err)
  530. m.errorCounter.WithLabelValues("apply").Inc()
  531. return
  532. }
  533. } else {
  534. m.leaderGuage.Set(0)
  535. level.Debug(m.logger).Log("msg", "local node is not the leader")
  536. if err := iproute.Set(m.kiloIface, false); err != nil {
  537. level.Error(m.logger).Log("error", err)
  538. m.errorCounter.WithLabelValues("apply").Inc()
  539. return
  540. }
  541. }
  542. // We need to add routes last since they may depend
  543. // on the WireGuard interface.
  544. routes, rules := t.Routes(link.Attrs().Name, m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc)
  545. if err := m.table.Set(routes, rules); err != nil {
  546. level.Error(m.logger).Log("error", err)
  547. m.errorCounter.WithLabelValues("apply").Inc()
  548. }
  549. }
  550. // RegisterMetrics registers Prometheus metrics on the given Prometheus
  551. // registerer.
  552. func (m *Mesh) RegisterMetrics(r prometheus.Registerer) {
  553. r.MustRegister(
  554. m.errorCounter,
  555. m.leaderGuage,
  556. m.nodesGuage,
  557. m.peersGuage,
  558. m.reconcileCounter,
  559. )
  560. }
  561. // Stop stops the mesh.
  562. func (m *Mesh) Stop() {
  563. close(m.stop)
  564. }
  565. func (m *Mesh) cleanUp() {
  566. if err := m.ipTables.CleanUp(); err != nil {
  567. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
  568. m.errorCounter.WithLabelValues("cleanUp").Inc()
  569. }
  570. if err := m.table.CleanUp(); err != nil {
  571. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
  572. m.errorCounter.WithLabelValues("cleanUp").Inc()
  573. }
  574. if err := os.Remove(confPath); err != nil {
  575. level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
  576. m.errorCounter.WithLabelValues("cleanUp").Inc()
  577. }
  578. if m.cleanUpIface {
  579. if err := iproute.RemoveInterface(m.kiloIface); err != nil {
  580. level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
  581. m.errorCounter.WithLabelValues("cleanUp").Inc()
  582. }
  583. }
  584. if err := m.Nodes().CleanUp(m.hostname); err != nil {
  585. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
  586. m.errorCounter.WithLabelValues("cleanUp").Inc()
  587. }
  588. if err := m.Peers().CleanUp(m.hostname); err != nil {
  589. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
  590. m.errorCounter.WithLabelValues("cleanUp").Inc()
  591. }
  592. if err := m.enc.CleanUp(); err != nil {
  593. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
  594. m.errorCounter.WithLabelValues("cleanUp").Inc()
  595. }
  596. }
  597. func (m *Mesh) resolveEndpoints() error {
  598. for k := range m.nodes {
  599. // Skip unready nodes, since they will not be used
  600. // in the topology anyways.
  601. if !m.nodes[k].Ready() {
  602. continue
  603. }
  604. // If the node is ready, then the endpoint is not nil
  605. // but it may not have a DNS name.
  606. if m.nodes[k].Endpoint.DNS == "" {
  607. continue
  608. }
  609. if err := resolveEndpoint(m.nodes[k].Endpoint); err != nil {
  610. return err
  611. }
  612. }
  613. for k := range m.peers {
  614. // Skip unready peers, since they will not be used
  615. // in the topology anyways.
  616. if !m.peers[k].Ready() {
  617. continue
  618. }
  619. // Peers may have nil endpoints.
  620. if m.peers[k].Endpoint == nil || m.peers[k].Endpoint.DNS == "" {
  621. continue
  622. }
  623. if err := resolveEndpoint(m.peers[k].Endpoint); err != nil {
  624. return err
  625. }
  626. }
  627. return nil
  628. }
  629. func resolveEndpoint(endpoint *wireguard.Endpoint) error {
  630. ips, err := net.LookupIP(endpoint.DNS)
  631. if err != nil {
  632. return fmt.Errorf("failed to look up DNS name %q: %v", endpoint.DNS, err)
  633. }
  634. nets := make([]*net.IPNet, len(ips), len(ips))
  635. for i := range ips {
  636. nets[i] = oneAddressCIDR(ips[i])
  637. }
  638. sortIPs(nets)
  639. if len(nets) == 0 {
  640. return fmt.Errorf("did not find any addresses for DNS name %q", endpoint.DNS)
  641. }
  642. endpoint.IP = nets[0].IP
  643. return nil
  644. }
  645. func isSelf(hostname string, node *Node) bool {
  646. return node != nil && node.Name == hostname
  647. }
  648. func nodesAreEqual(a, b *Node) bool {
  649. if (a != nil) != (b != nil) {
  650. return false
  651. }
  652. if a == b {
  653. return true
  654. }
  655. if !(a.Endpoint != nil) == (b.Endpoint != nil) {
  656. return false
  657. }
  658. if a.Endpoint != nil {
  659. if a.Endpoint.Port != b.Endpoint.Port {
  660. return false
  661. }
  662. // Check the DNS name first since this package
  663. // is doing the DNS resolution.
  664. if a.Endpoint.DNS != b.Endpoint.DNS {
  665. return false
  666. }
  667. if a.Endpoint.DNS == "" && !a.Endpoint.IP.Equal(b.Endpoint.IP) {
  668. return false
  669. }
  670. }
  671. // Ignore LastSeen when comparing equality we want to check if the nodes are
  672. // equivalent. However, we do want to check if LastSeen has transitioned
  673. // between valid and invalid.
  674. return string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready() && a.PersistentKeepalive == b.PersistentKeepalive
  675. }
  676. func peersAreEqual(a, b *Peer) bool {
  677. if !(a != nil) == (b != nil) {
  678. return false
  679. }
  680. if a == b {
  681. return true
  682. }
  683. if !(a.Endpoint != nil) == (b.Endpoint != nil) {
  684. return false
  685. }
  686. if a.Endpoint != nil {
  687. if a.Endpoint.Port != b.Endpoint.Port {
  688. return false
  689. }
  690. // Check the DNS name first since this package
  691. // is doing the DNS resolution.
  692. if a.Endpoint.DNS != b.Endpoint.DNS {
  693. return false
  694. }
  695. if a.Endpoint.DNS == "" && !a.Endpoint.IP.Equal(b.Endpoint.IP) {
  696. return false
  697. }
  698. }
  699. if len(a.AllowedIPs) != len(b.AllowedIPs) {
  700. return false
  701. }
  702. for i := range a.AllowedIPs {
  703. if !ipNetsEqual(a.AllowedIPs[i], b.AllowedIPs[i]) {
  704. return false
  705. }
  706. }
  707. return string(a.PublicKey) == string(b.PublicKey) && string(a.PresharedKey) == string(b.PresharedKey) && a.PersistentKeepalive == b.PersistentKeepalive
  708. }
  709. func ipNetsEqual(a, b *net.IPNet) bool {
  710. if a == nil && b == nil {
  711. return true
  712. }
  713. if (a != nil) != (b != nil) {
  714. return false
  715. }
  716. if a.Mask.String() != b.Mask.String() {
  717. return false
  718. }
  719. return a.IP.Equal(b.IP)
  720. }
  721. func subnetsEqual(a, b *net.IPNet) bool {
  722. if a == nil && b == nil {
  723. return true
  724. }
  725. if (a != nil) != (b != nil) {
  726. return false
  727. }
  728. if a.Mask.String() != b.Mask.String() {
  729. return false
  730. }
  731. if !a.Contains(b.IP) {
  732. return false
  733. }
  734. if !b.Contains(a.IP) {
  735. return false
  736. }
  737. return true
  738. }
  739. func linkByIndex(index int) (netlink.Link, error) {
  740. link, err := netlink.LinkByIndex(index)
  741. if err != nil {
  742. return nil, fmt.Errorf("failed to get interface: %v", err)
  743. }
  744. return link, nil
  745. }
  746. // updateNATEndpoints ensures that nodes and peers behind NAT update
  747. // their endpoints from the WireGuard configuration so they can roam.
  748. func updateNATEndpoints(nodes map[string]*Node, peers map[string]*Peer, conf *wireguard.Conf) {
  749. keys := make(map[string]*wireguard.Peer)
  750. for i := range conf.Peers {
  751. keys[string(conf.Peers[i].PublicKey)] = conf.Peers[i]
  752. }
  753. for _, n := range nodes {
  754. if peer, ok := keys[string(n.Key)]; ok && n.PersistentKeepalive > 0 {
  755. n.Endpoint = peer.Endpoint
  756. }
  757. }
  758. for _, p := range peers {
  759. if peer, ok := keys[string(p.PublicKey)]; ok && p.PersistentKeepalive > 0 {
  760. p.Endpoint = peer.Endpoint
  761. }
  762. }
  763. }