mesh.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. // Copyright 2019 the Kilo authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mesh
  15. import (
  16. "bytes"
  17. "fmt"
  18. "io/ioutil"
  19. "net"
  20. "os"
  21. "sync"
  22. "time"
  23. "github.com/go-kit/kit/log"
  24. "github.com/go-kit/kit/log/level"
  25. "github.com/prometheus/client_golang/prometheus"
  26. "github.com/vishvananda/netlink"
  27. "github.com/squat/kilo/pkg/encapsulation"
  28. "github.com/squat/kilo/pkg/iproute"
  29. "github.com/squat/kilo/pkg/iptables"
  30. "github.com/squat/kilo/pkg/route"
  31. "github.com/squat/kilo/pkg/wireguard"
  32. )
  33. const resyncPeriod = 30 * time.Second
  34. const (
  35. // KiloPath is the directory where Kilo stores its configuration.
  36. KiloPath = "/var/lib/kilo"
  37. // PrivateKeyPath is the filepath where the WireGuard private key is stored.
  38. PrivateKeyPath = KiloPath + "/key"
  39. // ConfPath is the filepath where the WireGuard configuration is stored.
  40. ConfPath = KiloPath + "/conf"
  41. // DefaultKiloPort is the default UDP port Kilo uses.
  42. DefaultKiloPort = 51820
  43. // DefaultCNIPath is the default path to the CNI config file.
  44. DefaultCNIPath = "/etc/cni/net.d/10-kilo.conflist"
  45. )
  46. // DefaultKiloSubnet is the default CIDR for Kilo.
  47. var DefaultKiloSubnet = &net.IPNet{IP: []byte{10, 4, 0, 0}, Mask: []byte{255, 255, 0, 0}}
  48. // Granularity represents the abstraction level at which the network
  49. // should be meshed.
  50. type Granularity string
  51. const (
  52. // LogicalGranularity indicates that the network should create
  53. // a mesh between logical locations, e.g. data-centers, but not between
  54. // all nodes within a single location.
  55. LogicalGranularity Granularity = "location"
  56. // FullGranularity indicates that the network should create
  57. // a mesh between every node.
  58. FullGranularity Granularity = "full"
  59. )
  60. // Node represents a node in the network.
  61. type Node struct {
  62. ExternalIP *net.IPNet
  63. Key []byte
  64. InternalIP *net.IPNet
  65. // LastSeen is a Unix time for the last time
  66. // the node confirmed it was live.
  67. LastSeen int64
  68. // Leader is a suggestion to Kilo that
  69. // the node wants to lead its segment.
  70. Leader bool
  71. Location string
  72. Name string
  73. Subnet *net.IPNet
  74. WireGuardIP *net.IPNet
  75. }
  76. // Ready indicates whether or not the node is ready.
  77. func (n *Node) Ready() bool {
  78. // Nodes that are not leaders will not have WireGuardIPs, so it is not required.
  79. return n != nil && n.ExternalIP != nil && n.Key != nil && n.InternalIP != nil && n.Subnet != nil && time.Now().Unix()-n.LastSeen < int64(resyncPeriod)*2/int64(time.Second)
  80. }
  81. // Peer represents a peer in the network.
  82. type Peer struct {
  83. wireguard.Peer
  84. Name string
  85. }
  86. // Ready indicates whether or not the peer is ready.
  87. func (p *Peer) Ready() bool {
  88. return p != nil && p.AllowedIPs != nil && len(p.AllowedIPs) != 0 && p.PublicKey != nil
  89. }
  90. // EventType describes what kind of an action an event represents.
  91. type EventType string
  92. const (
  93. // AddEvent represents an action where an item was added.
  94. AddEvent EventType = "add"
  95. // DeleteEvent represents an action where an item was removed.
  96. DeleteEvent EventType = "delete"
  97. // UpdateEvent represents an action where an item was updated.
  98. UpdateEvent EventType = "update"
  99. )
  100. // NodeEvent represents an event concerning a node in the cluster.
  101. type NodeEvent struct {
  102. Type EventType
  103. Node *Node
  104. Old *Node
  105. }
  106. // PeerEvent represents an event concerning a peer in the cluster.
  107. type PeerEvent struct {
  108. Type EventType
  109. Peer *Peer
  110. Old *Peer
  111. }
  112. // Backend can create clients for all of the
  113. // primitive types that Kilo deals with, namely:
  114. // * nodes; and
  115. // * peers.
  116. type Backend interface {
  117. Nodes() NodeBackend
  118. Peers() PeerBackend
  119. }
  120. // NodeBackend can get nodes by name, init itself,
  121. // list the nodes that should be meshed,
  122. // set Kilo properties for a node,
  123. // clean up any changes applied to the backend,
  124. // and watch for changes to nodes.
  125. type NodeBackend interface {
  126. CleanUp(string) error
  127. Get(string) (*Node, error)
  128. Init(<-chan struct{}) error
  129. List() ([]*Node, error)
  130. Set(string, *Node) error
  131. Watch() <-chan *NodeEvent
  132. }
  133. // PeerBackend can get peers by name, init itself,
  134. // list the peers that should be in the mesh,
  135. // set fields for a peer,
  136. // clean up any changes applied to the backend,
  137. // and watch for changes to peers.
  138. type PeerBackend interface {
  139. CleanUp(string) error
  140. Get(string) (*Peer, error)
  141. Init(<-chan struct{}) error
  142. List() ([]*Peer, error)
  143. Set(string, *Peer) error
  144. Watch() <-chan *PeerEvent
  145. }
  146. // Mesh is able to create Kilo network meshes.
  147. type Mesh struct {
  148. Backend
  149. cni bool
  150. cniPath string
  151. enc encapsulation.Interface
  152. externalIP *net.IPNet
  153. granularity Granularity
  154. hostname string
  155. internalIP *net.IPNet
  156. ipTables *iptables.Controller
  157. kiloIface int
  158. key []byte
  159. local bool
  160. port uint32
  161. priv []byte
  162. privIface int
  163. pub []byte
  164. pubIface int
  165. stop chan struct{}
  166. subnet *net.IPNet
  167. table *route.Table
  168. wireGuardIP *net.IPNet
  169. // nodes and peers are mutable fields in the struct
  170. // and needs to be guarded.
  171. nodes map[string]*Node
  172. peers map[string]*Peer
  173. mu sync.Mutex
  174. errorCounter *prometheus.CounterVec
  175. nodesGuage prometheus.Gauge
  176. peersGuage prometheus.Gauge
  177. reconcileCounter prometheus.Counter
  178. logger log.Logger
  179. }
  180. // New returns a new Mesh instance.
  181. func New(backend Backend, enc encapsulation.Interface, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath string, logger log.Logger) (*Mesh, error) {
  182. if err := os.MkdirAll(KiloPath, 0700); err != nil {
  183. return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
  184. }
  185. private, err := ioutil.ReadFile(PrivateKeyPath)
  186. private = bytes.Trim(private, "\n")
  187. if err != nil {
  188. level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
  189. if private, err = wireguard.GenKey(); err != nil {
  190. return nil, err
  191. }
  192. }
  193. public, err := wireguard.PubKey(private)
  194. if err != nil {
  195. return nil, err
  196. }
  197. if err := ioutil.WriteFile(PrivateKeyPath, private, 0600); err != nil {
  198. return nil, fmt.Errorf("failed to write private key to disk: %v", err)
  199. }
  200. cniIndex, err := cniDeviceIndex()
  201. if err != nil {
  202. return nil, fmt.Errorf("failed to query netlink for CNI device: %v", err)
  203. }
  204. privateIP, publicIP, err := getIP(hostname, enc.Index(), cniIndex)
  205. if err != nil {
  206. return nil, fmt.Errorf("failed to find public IP: %v", err)
  207. }
  208. ifaces, err := interfacesForIP(privateIP)
  209. if err != nil {
  210. return nil, fmt.Errorf("failed to find interface for private IP: %v", err)
  211. }
  212. privIface := ifaces[0].Index
  213. ifaces, err = interfacesForIP(publicIP)
  214. if err != nil {
  215. return nil, fmt.Errorf("failed to find interface for public IP: %v", err)
  216. }
  217. pubIface := ifaces[0].Index
  218. kiloIface, err := wireguard.New("kilo")
  219. if err != nil {
  220. return nil, fmt.Errorf("failed to create WireGuard interface: %v", err)
  221. }
  222. if enc.Strategy() != encapsulation.Never {
  223. if err := enc.Init(privIface); err != nil {
  224. return nil, fmt.Errorf("failed to initialize encapsulation: %v", err)
  225. }
  226. }
  227. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
  228. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
  229. ipTables, err := iptables.New(len(subnet.IP))
  230. if err != nil {
  231. return nil, fmt.Errorf("failed to IP tables controller: %v", err)
  232. }
  233. return &Mesh{
  234. Backend: backend,
  235. cni: cni,
  236. cniPath: cniPath,
  237. enc: enc,
  238. externalIP: publicIP,
  239. granularity: granularity,
  240. hostname: hostname,
  241. internalIP: privateIP,
  242. ipTables: ipTables,
  243. kiloIface: kiloIface,
  244. nodes: make(map[string]*Node),
  245. peers: make(map[string]*Peer),
  246. port: port,
  247. priv: private,
  248. privIface: privIface,
  249. pub: public,
  250. pubIface: pubIface,
  251. local: local,
  252. stop: make(chan struct{}),
  253. subnet: subnet,
  254. table: route.NewTable(),
  255. errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
  256. Name: "kilo_errors_total",
  257. Help: "Number of errors that occurred while administering the mesh.",
  258. }, []string{"event"}),
  259. nodesGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  260. Name: "kilo_nodes",
  261. Help: "Number of nodes in the mesh.",
  262. }),
  263. peersGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  264. Name: "kilo_peers",
  265. Help: "Number of peers in the mesh.",
  266. }),
  267. reconcileCounter: prometheus.NewCounter(prometheus.CounterOpts{
  268. Name: "kilo_reconciles_total",
  269. Help: "Number of reconciliation attempts.",
  270. }),
  271. logger: logger,
  272. }, nil
  273. }
  274. // Run starts the mesh.
  275. func (m *Mesh) Run() error {
  276. if err := m.Nodes().Init(m.stop); err != nil {
  277. return fmt.Errorf("failed to initialize node backend: %v", err)
  278. }
  279. // Try to set the CNI config quickly.
  280. if n, err := m.Nodes().Get(m.hostname); err == nil {
  281. if n != nil && n.Subnet != nil {
  282. m.nodes[m.hostname] = n
  283. m.updateCNIConfig()
  284. }
  285. }
  286. if err := m.Peers().Init(m.stop); err != nil {
  287. return fmt.Errorf("failed to initialize peer backend: %v", err)
  288. }
  289. ipTablesErrors, err := m.ipTables.Run(m.stop)
  290. if err != nil {
  291. return fmt.Errorf("failed to watch for IP tables updates: %v", err)
  292. }
  293. routeErrors, err := m.table.Run(m.stop)
  294. if err != nil {
  295. return fmt.Errorf("failed to watch for route table updates: %v", err)
  296. }
  297. go func() {
  298. for {
  299. var err error
  300. select {
  301. case err = <-ipTablesErrors:
  302. case err = <-routeErrors:
  303. case <-m.stop:
  304. return
  305. }
  306. if err != nil {
  307. level.Error(m.logger).Log("error", err)
  308. m.errorCounter.WithLabelValues("run").Inc()
  309. }
  310. }
  311. }()
  312. defer m.cleanUp()
  313. t := time.NewTimer(resyncPeriod)
  314. nw := m.Nodes().Watch()
  315. pw := m.Peers().Watch()
  316. var ne *NodeEvent
  317. var pe *PeerEvent
  318. for {
  319. select {
  320. case ne = <-nw:
  321. m.syncNodes(ne)
  322. case pe = <-pw:
  323. m.syncPeers(pe)
  324. case <-t.C:
  325. m.checkIn()
  326. if m.cni {
  327. m.updateCNIConfig()
  328. }
  329. m.syncEndpoints()
  330. m.applyTopology()
  331. t.Reset(resyncPeriod)
  332. case <-m.stop:
  333. return nil
  334. }
  335. }
  336. }
  337. // WireGuard updates the endpoints of peers to match the
  338. // last place a valid packet was received from.
  339. // Periodically we need to syncronize the endpoints
  340. // of peers in the backend to match the WireGuard configuration.
  341. func (m *Mesh) syncEndpoints() {
  342. link, err := linkByIndex(m.kiloIface)
  343. if err != nil {
  344. level.Error(m.logger).Log("error", err)
  345. m.errorCounter.WithLabelValues("endpoints").Inc()
  346. return
  347. }
  348. conf, err := wireguard.ShowConf(link.Attrs().Name)
  349. if err != nil {
  350. level.Error(m.logger).Log("error", err)
  351. m.errorCounter.WithLabelValues("endpoints").Inc()
  352. return
  353. }
  354. m.mu.Lock()
  355. defer m.mu.Unlock()
  356. c := wireguard.Parse(conf)
  357. var key string
  358. var tmp *Peer
  359. for i := range c.Peers {
  360. // Peers are indexed by public key.
  361. key = string(c.Peers[i].PublicKey)
  362. if p, ok := m.peers[key]; ok {
  363. tmp = &Peer{
  364. Name: p.Name,
  365. Peer: *c.Peers[i],
  366. }
  367. if !peersAreEqual(tmp, p) {
  368. p.Endpoint = tmp.Endpoint
  369. if err := m.Peers().Set(p.Name, p); err != nil {
  370. level.Error(m.logger).Log("error", err)
  371. m.errorCounter.WithLabelValues("endpoints").Inc()
  372. }
  373. }
  374. }
  375. }
  376. }
  377. func (m *Mesh) syncNodes(e *NodeEvent) {
  378. logger := log.With(m.logger, "event", e.Type)
  379. level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
  380. if isSelf(m.hostname, e.Node) {
  381. level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
  382. m.handleLocal(e.Node)
  383. return
  384. }
  385. var diff bool
  386. m.mu.Lock()
  387. if !e.Node.Ready() {
  388. level.Debug(logger).Log("msg", "received incomplete node", "node", e.Node)
  389. // An existing node is no longer valid
  390. // so remove it from the mesh.
  391. if _, ok := m.nodes[e.Node.Name]; ok {
  392. level.Info(logger).Log("msg", "node is no longer ready", "node", e.Node)
  393. diff = true
  394. }
  395. } else {
  396. switch e.Type {
  397. case AddEvent:
  398. fallthrough
  399. case UpdateEvent:
  400. if !nodesAreEqual(m.nodes[e.Node.Name], e.Node) {
  401. diff = true
  402. }
  403. // Even if the nodes are the same,
  404. // overwrite the old node to update the timestamp.
  405. m.nodes[e.Node.Name] = e.Node
  406. case DeleteEvent:
  407. delete(m.nodes, e.Node.Name)
  408. diff = true
  409. }
  410. }
  411. m.mu.Unlock()
  412. if diff {
  413. level.Info(logger).Log("node", e.Node)
  414. m.applyTopology()
  415. }
  416. }
  417. func (m *Mesh) syncPeers(e *PeerEvent) {
  418. logger := log.With(m.logger, "event", e.Type)
  419. level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
  420. var diff bool
  421. m.mu.Lock()
  422. // Peers are indexed by public key.
  423. key := string(e.Peer.PublicKey)
  424. if !e.Peer.Ready() {
  425. level.Debug(logger).Log("msg", "received incomplete peer", "peer", e.Peer)
  426. // An existing peer is no longer valid
  427. // so remove it from the mesh.
  428. if _, ok := m.peers[key]; ok {
  429. level.Info(logger).Log("msg", "peer is no longer ready", "peer", e.Peer)
  430. diff = true
  431. }
  432. } else {
  433. switch e.Type {
  434. case AddEvent:
  435. fallthrough
  436. case UpdateEvent:
  437. if e.Old != nil && key != string(e.Old.PublicKey) {
  438. delete(m.peers, string(e.Old.PublicKey))
  439. diff = true
  440. }
  441. if !peersAreEqual(m.peers[key], e.Peer) {
  442. m.peers[key] = e.Peer
  443. diff = true
  444. }
  445. case DeleteEvent:
  446. delete(m.peers, key)
  447. diff = true
  448. }
  449. }
  450. m.mu.Unlock()
  451. if diff {
  452. level.Info(logger).Log("peer", e.Peer)
  453. m.applyTopology()
  454. }
  455. }
  456. // checkIn will try to update the local node's LastSeen timestamp
  457. // in the backend.
  458. func (m *Mesh) checkIn() {
  459. m.mu.Lock()
  460. defer m.mu.Unlock()
  461. n := m.nodes[m.hostname]
  462. if n == nil {
  463. level.Debug(m.logger).Log("msg", "no local node found in backend")
  464. return
  465. }
  466. oldTime := n.LastSeen
  467. n.LastSeen = time.Now().Unix()
  468. if err := m.Nodes().Set(m.hostname, n); err != nil {
  469. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
  470. m.errorCounter.WithLabelValues("checkin").Inc()
  471. // Revert time.
  472. n.LastSeen = oldTime
  473. return
  474. }
  475. level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
  476. }
  477. func (m *Mesh) handleLocal(n *Node) {
  478. // Allow the external IP to be overridden.
  479. if n.ExternalIP == nil {
  480. n.ExternalIP = m.externalIP
  481. }
  482. // Compare the given node to the calculated local node.
  483. // Take leader, location, and subnet from the argument, as these
  484. // are not determined by kilo.
  485. local := &Node{
  486. ExternalIP: n.ExternalIP,
  487. Key: m.pub,
  488. InternalIP: m.internalIP,
  489. LastSeen: time.Now().Unix(),
  490. Leader: n.Leader,
  491. Location: n.Location,
  492. Name: m.hostname,
  493. Subnet: n.Subnet,
  494. WireGuardIP: m.wireGuardIP,
  495. }
  496. if !nodesAreEqual(n, local) {
  497. level.Debug(m.logger).Log("msg", "local node differs from backend")
  498. if err := m.Nodes().Set(m.hostname, local); err != nil {
  499. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
  500. m.errorCounter.WithLabelValues("local").Inc()
  501. return
  502. }
  503. level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
  504. }
  505. m.mu.Lock()
  506. n = m.nodes[m.hostname]
  507. if n == nil {
  508. n = &Node{}
  509. }
  510. m.mu.Unlock()
  511. if !nodesAreEqual(n, local) {
  512. m.mu.Lock()
  513. m.nodes[local.Name] = local
  514. m.mu.Unlock()
  515. m.applyTopology()
  516. }
  517. }
  518. func (m *Mesh) applyTopology() {
  519. m.reconcileCounter.Inc()
  520. m.mu.Lock()
  521. defer m.mu.Unlock()
  522. // Ensure only ready nodes are considered.
  523. nodes := make(map[string]*Node)
  524. var readyNodes float64
  525. for k := range m.nodes {
  526. if !m.nodes[k].Ready() {
  527. continue
  528. }
  529. nodes[k] = m.nodes[k]
  530. readyNodes++
  531. }
  532. // Ensure only ready nodes are considered.
  533. peers := make(map[string]*Peer)
  534. var readyPeers float64
  535. for k := range m.peers {
  536. if !m.peers[k].Ready() {
  537. continue
  538. }
  539. peers[k] = m.peers[k]
  540. readyPeers++
  541. }
  542. m.nodesGuage.Set(readyNodes)
  543. m.peersGuage.Set(readyPeers)
  544. // We cannot do anything with the topology until the local node is available.
  545. if nodes[m.hostname] == nil {
  546. return
  547. }
  548. t, err := NewTopology(nodes, peers, m.granularity, m.hostname, m.port, m.priv, m.subnet)
  549. if err != nil {
  550. level.Error(m.logger).Log("error", err)
  551. m.errorCounter.WithLabelValues("apply").Inc()
  552. return
  553. }
  554. // Update the node's WireGuard IP.
  555. m.wireGuardIP = t.wireGuardCIDR
  556. conf := t.Conf()
  557. buf, err := conf.Bytes()
  558. if err != nil {
  559. level.Error(m.logger).Log("error", err)
  560. m.errorCounter.WithLabelValues("apply").Inc()
  561. }
  562. if err := ioutil.WriteFile(ConfPath, buf, 0600); err != nil {
  563. level.Error(m.logger).Log("error", err)
  564. m.errorCounter.WithLabelValues("apply").Inc()
  565. return
  566. }
  567. rules := iptables.ForwardRules(m.subnet)
  568. var peerCIDRs []*net.IPNet
  569. for _, p := range peers {
  570. rules = append(rules, iptables.ForwardRules(p.AllowedIPs...)...)
  571. peerCIDRs = append(peerCIDRs, p.AllowedIPs...)
  572. }
  573. rules = append(rules, iptables.MasqueradeRules(m.subnet, oneAddressCIDR(t.privateIP.IP), nodes[m.hostname].Subnet, t.RemoteSubnets(), peerCIDRs)...)
  574. // If we are handling local routes, ensure the local
  575. // tunnel has an IP address and IPIP traffic is allowed.
  576. if m.enc.Strategy() != encapsulation.Never && m.local {
  577. var cidrs []*net.IPNet
  578. for _, s := range t.segments {
  579. if s.location == nodes[m.hostname].Location {
  580. for i := range s.privateIPs {
  581. cidrs = append(cidrs, oneAddressCIDR(s.privateIPs[i]))
  582. }
  583. break
  584. }
  585. }
  586. rules = append(rules, m.enc.Rules(cidrs)...)
  587. // If we are handling local routes, ensure the local
  588. // tunnel has an IP address.
  589. if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
  590. level.Error(m.logger).Log("error", err)
  591. m.errorCounter.WithLabelValues("apply").Inc()
  592. return
  593. }
  594. }
  595. if err := m.ipTables.Set(rules); err != nil {
  596. level.Error(m.logger).Log("error", err)
  597. m.errorCounter.WithLabelValues("apply").Inc()
  598. return
  599. }
  600. if t.leader {
  601. if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
  602. level.Error(m.logger).Log("error", err)
  603. m.errorCounter.WithLabelValues("apply").Inc()
  604. return
  605. }
  606. link, err := linkByIndex(m.kiloIface)
  607. if err != nil {
  608. level.Error(m.logger).Log("error", err)
  609. m.errorCounter.WithLabelValues("apply").Inc()
  610. return
  611. }
  612. oldConf, err := wireguard.ShowConf(link.Attrs().Name)
  613. if err != nil {
  614. level.Error(m.logger).Log("error", err)
  615. m.errorCounter.WithLabelValues("apply").Inc()
  616. return
  617. }
  618. // Setting the WireGuard configuration interrupts existing connections
  619. // so only set the configuration if it has changed.
  620. equal := conf.Equal(wireguard.Parse(oldConf))
  621. if !equal {
  622. level.Info(m.logger).Log("msg", "WireGuard configurations are different")
  623. if err := wireguard.SetConf(link.Attrs().Name, ConfPath); err != nil {
  624. level.Error(m.logger).Log("error", err)
  625. m.errorCounter.WithLabelValues("apply").Inc()
  626. return
  627. }
  628. }
  629. if err := iproute.Set(m.kiloIface, true); err != nil {
  630. level.Error(m.logger).Log("error", err)
  631. m.errorCounter.WithLabelValues("apply").Inc()
  632. return
  633. }
  634. } else {
  635. level.Debug(m.logger).Log("msg", "local node is not the leader")
  636. if err := iproute.Set(m.kiloIface, false); err != nil {
  637. level.Error(m.logger).Log("error", err)
  638. m.errorCounter.WithLabelValues("apply").Inc()
  639. return
  640. }
  641. }
  642. // We need to add routes last since they may depend
  643. // on the WireGuard interface.
  644. routes := t.Routes(m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc.Strategy())
  645. if err := m.table.Set(routes); err != nil {
  646. level.Error(m.logger).Log("error", err)
  647. m.errorCounter.WithLabelValues("apply").Inc()
  648. }
  649. }
  650. // RegisterMetrics registers Prometheus metrics on the given Prometheus
  651. // registerer.
  652. func (m *Mesh) RegisterMetrics(r prometheus.Registerer) {
  653. r.MustRegister(
  654. m.errorCounter,
  655. m.nodesGuage,
  656. m.peersGuage,
  657. m.reconcileCounter,
  658. )
  659. }
  660. // Stop stops the mesh.
  661. func (m *Mesh) Stop() {
  662. close(m.stop)
  663. }
  664. func (m *Mesh) cleanUp() {
  665. if err := m.ipTables.CleanUp(); err != nil {
  666. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
  667. m.errorCounter.WithLabelValues("cleanUp").Inc()
  668. }
  669. if err := m.table.CleanUp(); err != nil {
  670. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
  671. m.errorCounter.WithLabelValues("cleanUp").Inc()
  672. }
  673. if err := os.Remove(ConfPath); err != nil {
  674. level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
  675. m.errorCounter.WithLabelValues("cleanUp").Inc()
  676. }
  677. if err := iproute.RemoveInterface(m.kiloIface); err != nil {
  678. level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
  679. m.errorCounter.WithLabelValues("cleanUp").Inc()
  680. }
  681. if err := m.Nodes().CleanUp(m.hostname); err != nil {
  682. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
  683. m.errorCounter.WithLabelValues("cleanUp").Inc()
  684. }
  685. if err := m.Peers().CleanUp(m.hostname); err != nil {
  686. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
  687. m.errorCounter.WithLabelValues("cleanUp").Inc()
  688. }
  689. if err := m.enc.CleanUp(); err != nil {
  690. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulation: %v", err))
  691. m.errorCounter.WithLabelValues("cleanUp").Inc()
  692. }
  693. }
  694. func isSelf(hostname string, node *Node) bool {
  695. return node != nil && node.Name == hostname
  696. }
  697. func nodesAreEqual(a, b *Node) bool {
  698. if !(a != nil) == (b != nil) {
  699. return false
  700. }
  701. if a == b {
  702. return true
  703. }
  704. // Ignore LastSeen when comparing equality we want to check if the nodes are
  705. // equivalent. However, we do want to check if LastSeen has transitioned
  706. // between valid and invalid.
  707. return ipNetsEqual(a.ExternalIP, b.ExternalIP) && string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready()
  708. }
  709. func peersAreEqual(a, b *Peer) bool {
  710. if !(a != nil) == (b != nil) {
  711. return false
  712. }
  713. if a == b {
  714. return true
  715. }
  716. if !(a.Endpoint != nil) == (b.Endpoint != nil) {
  717. return false
  718. }
  719. if a.Endpoint != nil {
  720. if !a.Endpoint.IP.Equal(b.Endpoint.IP) || a.Endpoint.Port != b.Endpoint.Port {
  721. return false
  722. }
  723. }
  724. if len(a.AllowedIPs) != len(b.AllowedIPs) {
  725. return false
  726. }
  727. for i := range a.AllowedIPs {
  728. if !ipNetsEqual(a.AllowedIPs[i], b.AllowedIPs[i]) {
  729. return false
  730. }
  731. }
  732. return string(a.PublicKey) == string(b.PublicKey) && a.PersistentKeepalive == b.PersistentKeepalive
  733. }
  734. func ipNetsEqual(a, b *net.IPNet) bool {
  735. if a == nil && b == nil {
  736. return true
  737. }
  738. if (a != nil) != (b != nil) {
  739. return false
  740. }
  741. if a.Mask.String() != b.Mask.String() {
  742. return false
  743. }
  744. return a.IP.Equal(b.IP)
  745. }
  746. func subnetsEqual(a, b *net.IPNet) bool {
  747. if a == nil && b == nil {
  748. return true
  749. }
  750. if (a != nil) != (b != nil) {
  751. return false
  752. }
  753. if a.Mask.String() != b.Mask.String() {
  754. return false
  755. }
  756. if !a.Contains(b.IP) {
  757. return false
  758. }
  759. if !b.Contains(a.IP) {
  760. return false
  761. }
  762. return true
  763. }
  764. func linkByIndex(index int) (netlink.Link, error) {
  765. link, err := netlink.LinkByIndex(index)
  766. if err != nil {
  767. return nil, fmt.Errorf("failed to get interface: %v", err)
  768. }
  769. return link, nil
  770. }