mesh.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. // Copyright 2019 the Kilo authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package mesh
  15. import (
  16. "bytes"
  17. "fmt"
  18. "io/ioutil"
  19. "net"
  20. "os"
  21. "sync"
  22. "time"
  23. "github.com/go-kit/kit/log"
  24. "github.com/go-kit/kit/log/level"
  25. "github.com/prometheus/client_golang/prometheus"
  26. "github.com/vishvananda/netlink"
  27. "github.com/squat/kilo/pkg/encapsulation"
  28. "github.com/squat/kilo/pkg/iproute"
  29. "github.com/squat/kilo/pkg/iptables"
  30. "github.com/squat/kilo/pkg/route"
  31. "github.com/squat/kilo/pkg/wireguard"
  32. )
  33. const resyncPeriod = 30 * time.Second
  34. const (
  35. // KiloPath is the directory where Kilo stores its configuration.
  36. KiloPath = "/var/lib/kilo"
  37. // PrivateKeyPath is the filepath where the WireGuard private key is stored.
  38. PrivateKeyPath = KiloPath + "/key"
  39. // ConfPath is the filepath where the WireGuard configuration is stored.
  40. ConfPath = KiloPath + "/conf"
  41. // DefaultKiloInterface is the default iterface created and used by Kilo.
  42. DefaultKiloInterface = "kilo0"
  43. // DefaultKiloPort is the default UDP port Kilo uses.
  44. DefaultKiloPort = 51820
  45. // DefaultCNIPath is the default path to the CNI config file.
  46. DefaultCNIPath = "/etc/cni/net.d/10-kilo.conflist"
  47. )
  48. // DefaultKiloSubnet is the default CIDR for Kilo.
  49. var DefaultKiloSubnet = &net.IPNet{IP: []byte{10, 4, 0, 0}, Mask: []byte{255, 255, 0, 0}}
  50. // Granularity represents the abstraction level at which the network
  51. // should be meshed.
  52. type Granularity string
  53. const (
  54. // LogicalGranularity indicates that the network should create
  55. // a mesh between logical locations, e.g. data-centers, but not between
  56. // all nodes within a single location.
  57. LogicalGranularity Granularity = "location"
  58. // FullGranularity indicates that the network should create
  59. // a mesh between every node.
  60. FullGranularity Granularity = "full"
  61. )
  62. // Node represents a node in the network.
  63. type Node struct {
  64. Endpoint *wireguard.Endpoint
  65. Key []byte
  66. InternalIP *net.IPNet
  67. // LastSeen is a Unix time for the last time
  68. // the node confirmed it was live.
  69. LastSeen int64
  70. // Leader is a suggestion to Kilo that
  71. // the node wants to lead its segment.
  72. Leader bool
  73. Location string
  74. Name string
  75. PersistentKeepalive int
  76. Subnet *net.IPNet
  77. WireGuardIP *net.IPNet
  78. }
  79. // Ready indicates whether or not the node is ready.
  80. func (n *Node) Ready() bool {
  81. // Nodes that are not leaders will not have WireGuardIPs, so it is not required.
  82. return n != nil && n.Endpoint != nil && !(n.Endpoint.IP == nil && n.Endpoint.DNS == "") && n.Endpoint.Port != 0 && n.Key != nil && n.InternalIP != nil && n.Subnet != nil && time.Now().Unix()-n.LastSeen < int64(resyncPeriod)*2/int64(time.Second)
  83. }
  84. // Peer represents a peer in the network.
  85. type Peer struct {
  86. wireguard.Peer
  87. Name string
  88. }
  89. // Ready indicates whether or not the peer is ready.
  90. // Peers can have empty endpoints because they may not have an
  91. // IP, for example if they are behind a NAT, and thus
  92. // will not declare their endpoint and instead allow it to be
  93. // discovered.
  94. func (p *Peer) Ready() bool {
  95. return p != nil && p.AllowedIPs != nil && len(p.AllowedIPs) != 0 && p.PublicKey != nil
  96. }
  97. // EventType describes what kind of an action an event represents.
  98. type EventType string
  99. const (
  100. // AddEvent represents an action where an item was added.
  101. AddEvent EventType = "add"
  102. // DeleteEvent represents an action where an item was removed.
  103. DeleteEvent EventType = "delete"
  104. // UpdateEvent represents an action where an item was updated.
  105. UpdateEvent EventType = "update"
  106. )
  107. // NodeEvent represents an event concerning a node in the cluster.
  108. type NodeEvent struct {
  109. Type EventType
  110. Node *Node
  111. Old *Node
  112. }
  113. // PeerEvent represents an event concerning a peer in the cluster.
  114. type PeerEvent struct {
  115. Type EventType
  116. Peer *Peer
  117. Old *Peer
  118. }
  119. // Backend can create clients for all of the
  120. // primitive types that Kilo deals with, namely:
  121. // * nodes; and
  122. // * peers.
  123. type Backend interface {
  124. Nodes() NodeBackend
  125. Peers() PeerBackend
  126. }
  127. // NodeBackend can get nodes by name, init itself,
  128. // list the nodes that should be meshed,
  129. // set Kilo properties for a node,
  130. // clean up any changes applied to the backend,
  131. // and watch for changes to nodes.
  132. type NodeBackend interface {
  133. CleanUp(string) error
  134. Get(string) (*Node, error)
  135. Init(<-chan struct{}) error
  136. List() ([]*Node, error)
  137. Set(string, *Node) error
  138. Watch() <-chan *NodeEvent
  139. }
  140. // PeerBackend can get peers by name, init itself,
  141. // list the peers that should be in the mesh,
  142. // set fields for a peer,
  143. // clean up any changes applied to the backend,
  144. // and watch for changes to peers.
  145. type PeerBackend interface {
  146. CleanUp(string) error
  147. Get(string) (*Peer, error)
  148. Init(<-chan struct{}) error
  149. List() ([]*Peer, error)
  150. Set(string, *Peer) error
  151. Watch() <-chan *PeerEvent
  152. }
  153. // Mesh is able to create Kilo network meshes.
  154. type Mesh struct {
  155. Backend
  156. cleanUpIface bool
  157. cni bool
  158. cniPath string
  159. enc encapsulation.Encapsulator
  160. externalIP *net.IPNet
  161. granularity Granularity
  162. hostname string
  163. internalIP *net.IPNet
  164. ipTables *iptables.Controller
  165. kiloIface int
  166. key []byte
  167. local bool
  168. port uint32
  169. priv []byte
  170. privIface int
  171. pub []byte
  172. stop chan struct{}
  173. subnet *net.IPNet
  174. table *route.Table
  175. wireGuardIP *net.IPNet
  176. // nodes and peers are mutable fields in the struct
  177. // and needs to be guarded.
  178. nodes map[string]*Node
  179. peers map[string]*Peer
  180. mu sync.Mutex
  181. errorCounter *prometheus.CounterVec
  182. nodesGuage prometheus.Gauge
  183. peersGuage prometheus.Gauge
  184. reconcileCounter prometheus.Counter
  185. logger log.Logger
  186. }
  187. // New returns a new Mesh instance.
  188. func New(backend Backend, enc encapsulation.Encapsulator, granularity Granularity, hostname string, port uint32, subnet *net.IPNet, local, cni bool, cniPath, iface string, cleanUpIface bool, logger log.Logger) (*Mesh, error) {
  189. if err := os.MkdirAll(KiloPath, 0700); err != nil {
  190. return nil, fmt.Errorf("failed to create directory to store configuration: %v", err)
  191. }
  192. private, err := ioutil.ReadFile(PrivateKeyPath)
  193. private = bytes.Trim(private, "\n")
  194. if err != nil {
  195. level.Warn(logger).Log("msg", "no private key found on disk; generating one now")
  196. if private, err = wireguard.GenKey(); err != nil {
  197. return nil, err
  198. }
  199. }
  200. public, err := wireguard.PubKey(private)
  201. if err != nil {
  202. return nil, err
  203. }
  204. if err := ioutil.WriteFile(PrivateKeyPath, private, 0600); err != nil {
  205. return nil, fmt.Errorf("failed to write private key to disk: %v", err)
  206. }
  207. cniIndex, err := cniDeviceIndex()
  208. if err != nil {
  209. return nil, fmt.Errorf("failed to query netlink for CNI device: %v", err)
  210. }
  211. privateIP, publicIP, err := getIP(hostname, enc.Index(), cniIndex)
  212. if err != nil {
  213. return nil, fmt.Errorf("failed to find public IP: %v", err)
  214. }
  215. ifaces, err := interfacesForIP(privateIP)
  216. if err != nil {
  217. return nil, fmt.Errorf("failed to find interface for private IP: %v", err)
  218. }
  219. privIface := ifaces[0].Index
  220. kiloIface, _, err := wireguard.New(iface)
  221. if err != nil {
  222. return nil, fmt.Errorf("failed to create WireGuard interface: %v", err)
  223. }
  224. if enc.Strategy() != encapsulation.Never {
  225. if err := enc.Init(privIface); err != nil {
  226. return nil, fmt.Errorf("failed to initialize encapsulator: %v", err)
  227. }
  228. }
  229. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String()))
  230. level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String()))
  231. ipTables, err := iptables.New(len(subnet.IP))
  232. if err != nil {
  233. return nil, fmt.Errorf("failed to IP tables controller: %v", err)
  234. }
  235. return &Mesh{
  236. Backend: backend,
  237. cleanUpIface: cleanUpIface,
  238. cni: cni,
  239. cniPath: cniPath,
  240. enc: enc,
  241. externalIP: publicIP,
  242. granularity: granularity,
  243. hostname: hostname,
  244. internalIP: privateIP,
  245. ipTables: ipTables,
  246. kiloIface: kiloIface,
  247. nodes: make(map[string]*Node),
  248. peers: make(map[string]*Peer),
  249. port: port,
  250. priv: private,
  251. privIface: privIface,
  252. pub: public,
  253. local: local,
  254. stop: make(chan struct{}),
  255. subnet: subnet,
  256. table: route.NewTable(),
  257. errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{
  258. Name: "kilo_errors_total",
  259. Help: "Number of errors that occurred while administering the mesh.",
  260. }, []string{"event"}),
  261. nodesGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  262. Name: "kilo_nodes",
  263. Help: "Number of nodes in the mesh.",
  264. }),
  265. peersGuage: prometheus.NewGauge(prometheus.GaugeOpts{
  266. Name: "kilo_peers",
  267. Help: "Number of peers in the mesh.",
  268. }),
  269. reconcileCounter: prometheus.NewCounter(prometheus.CounterOpts{
  270. Name: "kilo_reconciles_total",
  271. Help: "Number of reconciliation attempts.",
  272. }),
  273. logger: logger,
  274. }, nil
  275. }
  276. // Run starts the mesh.
  277. func (m *Mesh) Run() error {
  278. if err := m.Nodes().Init(m.stop); err != nil {
  279. return fmt.Errorf("failed to initialize node backend: %v", err)
  280. }
  281. // Try to set the CNI config quickly.
  282. if n, err := m.Nodes().Get(m.hostname); err == nil {
  283. if n != nil && n.Subnet != nil {
  284. m.nodes[m.hostname] = n
  285. m.updateCNIConfig()
  286. }
  287. }
  288. if err := m.Peers().Init(m.stop); err != nil {
  289. return fmt.Errorf("failed to initialize peer backend: %v", err)
  290. }
  291. ipTablesErrors, err := m.ipTables.Run(m.stop)
  292. if err != nil {
  293. return fmt.Errorf("failed to watch for IP tables updates: %v", err)
  294. }
  295. routeErrors, err := m.table.Run(m.stop)
  296. if err != nil {
  297. return fmt.Errorf("failed to watch for route table updates: %v", err)
  298. }
  299. go func() {
  300. for {
  301. var err error
  302. select {
  303. case err = <-ipTablesErrors:
  304. case err = <-routeErrors:
  305. case <-m.stop:
  306. return
  307. }
  308. if err != nil {
  309. level.Error(m.logger).Log("error", err)
  310. m.errorCounter.WithLabelValues("run").Inc()
  311. }
  312. }
  313. }()
  314. defer m.cleanUp()
  315. t := time.NewTimer(resyncPeriod)
  316. nw := m.Nodes().Watch()
  317. pw := m.Peers().Watch()
  318. var ne *NodeEvent
  319. var pe *PeerEvent
  320. for {
  321. select {
  322. case ne = <-nw:
  323. m.syncNodes(ne)
  324. case pe = <-pw:
  325. m.syncPeers(pe)
  326. case <-t.C:
  327. m.checkIn()
  328. if m.cni {
  329. m.updateCNIConfig()
  330. }
  331. m.syncEndpoints()
  332. m.applyTopology()
  333. t.Reset(resyncPeriod)
  334. case <-m.stop:
  335. return nil
  336. }
  337. }
  338. }
  339. // WireGuard updates the endpoints of peers to match the
  340. // last place a valid packet was received from.
  341. // Periodically we need to syncronize the endpoints
  342. // of peers in the backend to match the WireGuard configuration.
  343. func (m *Mesh) syncEndpoints() {
  344. link, err := linkByIndex(m.kiloIface)
  345. if err != nil {
  346. level.Error(m.logger).Log("error", err)
  347. m.errorCounter.WithLabelValues("endpoints").Inc()
  348. return
  349. }
  350. conf, err := wireguard.ShowConf(link.Attrs().Name)
  351. if err != nil {
  352. level.Error(m.logger).Log("error", err)
  353. m.errorCounter.WithLabelValues("endpoints").Inc()
  354. return
  355. }
  356. m.mu.Lock()
  357. defer m.mu.Unlock()
  358. c := wireguard.Parse(conf)
  359. var key string
  360. var tmp *Peer
  361. for i := range c.Peers {
  362. // Peers are indexed by public key.
  363. key = string(c.Peers[i].PublicKey)
  364. if p, ok := m.peers[key]; ok {
  365. tmp = &Peer{
  366. Name: p.Name,
  367. Peer: *c.Peers[i],
  368. }
  369. if !peersAreEqual(tmp, p) {
  370. p.Endpoint = tmp.Endpoint
  371. if err := m.Peers().Set(p.Name, p); err != nil {
  372. level.Error(m.logger).Log("error", err)
  373. m.errorCounter.WithLabelValues("endpoints").Inc()
  374. }
  375. }
  376. }
  377. }
  378. }
  379. func (m *Mesh) syncNodes(e *NodeEvent) {
  380. logger := log.With(m.logger, "event", e.Type)
  381. level.Debug(logger).Log("msg", "syncing nodes", "event", e.Type)
  382. if isSelf(m.hostname, e.Node) {
  383. level.Debug(logger).Log("msg", "processing local node", "node", e.Node)
  384. m.handleLocal(e.Node)
  385. return
  386. }
  387. var diff bool
  388. m.mu.Lock()
  389. if !e.Node.Ready() {
  390. level.Debug(logger).Log("msg", "received incomplete node", "node", e.Node)
  391. // An existing node is no longer valid
  392. // so remove it from the mesh.
  393. if _, ok := m.nodes[e.Node.Name]; ok {
  394. level.Info(logger).Log("msg", "node is no longer ready", "node", e.Node)
  395. diff = true
  396. }
  397. } else {
  398. switch e.Type {
  399. case AddEvent:
  400. fallthrough
  401. case UpdateEvent:
  402. if !nodesAreEqual(m.nodes[e.Node.Name], e.Node) {
  403. diff = true
  404. }
  405. // Even if the nodes are the same,
  406. // overwrite the old node to update the timestamp.
  407. m.nodes[e.Node.Name] = e.Node
  408. case DeleteEvent:
  409. delete(m.nodes, e.Node.Name)
  410. diff = true
  411. }
  412. }
  413. m.mu.Unlock()
  414. if diff {
  415. level.Info(logger).Log("node", e.Node)
  416. m.applyTopology()
  417. }
  418. }
  419. func (m *Mesh) syncPeers(e *PeerEvent) {
  420. logger := log.With(m.logger, "event", e.Type)
  421. level.Debug(logger).Log("msg", "syncing peers", "event", e.Type)
  422. var diff bool
  423. m.mu.Lock()
  424. // Peers are indexed by public key.
  425. key := string(e.Peer.PublicKey)
  426. if !e.Peer.Ready() {
  427. level.Debug(logger).Log("msg", "received incomplete peer", "peer", e.Peer)
  428. // An existing peer is no longer valid
  429. // so remove it from the mesh.
  430. if _, ok := m.peers[key]; ok {
  431. level.Info(logger).Log("msg", "peer is no longer ready", "peer", e.Peer)
  432. diff = true
  433. }
  434. } else {
  435. switch e.Type {
  436. case AddEvent:
  437. fallthrough
  438. case UpdateEvent:
  439. if e.Old != nil && key != string(e.Old.PublicKey) {
  440. delete(m.peers, string(e.Old.PublicKey))
  441. diff = true
  442. }
  443. if !peersAreEqual(m.peers[key], e.Peer) {
  444. m.peers[key] = e.Peer
  445. diff = true
  446. }
  447. case DeleteEvent:
  448. delete(m.peers, key)
  449. diff = true
  450. }
  451. }
  452. m.mu.Unlock()
  453. if diff {
  454. level.Info(logger).Log("peer", e.Peer)
  455. m.applyTopology()
  456. }
  457. }
  458. // checkIn will try to update the local node's LastSeen timestamp
  459. // in the backend.
  460. func (m *Mesh) checkIn() {
  461. m.mu.Lock()
  462. defer m.mu.Unlock()
  463. n := m.nodes[m.hostname]
  464. if n == nil {
  465. level.Debug(m.logger).Log("msg", "no local node found in backend")
  466. return
  467. }
  468. oldTime := n.LastSeen
  469. n.LastSeen = time.Now().Unix()
  470. if err := m.Nodes().Set(m.hostname, n); err != nil {
  471. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", n)
  472. m.errorCounter.WithLabelValues("checkin").Inc()
  473. // Revert time.
  474. n.LastSeen = oldTime
  475. return
  476. }
  477. level.Debug(m.logger).Log("msg", "successfully checked in local node in backend")
  478. }
  479. func (m *Mesh) handleLocal(n *Node) {
  480. // Allow the IPs to be overridden.
  481. if n.Endpoint == nil || (n.Endpoint.DNS == "" && n.Endpoint.IP == nil) {
  482. n.Endpoint = &wireguard.Endpoint{DNSOrIP: wireguard.DNSOrIP{IP: m.externalIP.IP}, Port: m.port}
  483. }
  484. if n.InternalIP == nil {
  485. n.InternalIP = m.internalIP
  486. }
  487. // Compare the given node to the calculated local node.
  488. // Take leader, location, and subnet from the argument, as these
  489. // are not determined by kilo.
  490. local := &Node{
  491. Endpoint: n.Endpoint,
  492. Key: m.pub,
  493. InternalIP: n.InternalIP,
  494. LastSeen: time.Now().Unix(),
  495. Leader: n.Leader,
  496. Location: n.Location,
  497. Name: m.hostname,
  498. Subnet: n.Subnet,
  499. WireGuardIP: m.wireGuardIP,
  500. }
  501. if !nodesAreEqual(n, local) {
  502. level.Debug(m.logger).Log("msg", "local node differs from backend")
  503. if err := m.Nodes().Set(m.hostname, local); err != nil {
  504. level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local)
  505. m.errorCounter.WithLabelValues("local").Inc()
  506. return
  507. }
  508. level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend")
  509. }
  510. m.mu.Lock()
  511. n = m.nodes[m.hostname]
  512. if n == nil {
  513. n = &Node{}
  514. }
  515. m.mu.Unlock()
  516. if !nodesAreEqual(n, local) {
  517. m.mu.Lock()
  518. m.nodes[local.Name] = local
  519. m.mu.Unlock()
  520. m.applyTopology()
  521. }
  522. }
  523. func (m *Mesh) applyTopology() {
  524. m.reconcileCounter.Inc()
  525. m.mu.Lock()
  526. defer m.mu.Unlock()
  527. // If we can't resolve an endpoint, then fail and retry later.
  528. if err := m.resolveEndpoints(); err != nil {
  529. level.Error(m.logger).Log("error", err)
  530. m.errorCounter.WithLabelValues("apply").Inc()
  531. return
  532. }
  533. // Ensure only ready nodes are considered.
  534. nodes := make(map[string]*Node)
  535. var readyNodes float64
  536. for k := range m.nodes {
  537. if !m.nodes[k].Ready() {
  538. continue
  539. }
  540. nodes[k] = m.nodes[k]
  541. readyNodes++
  542. }
  543. // Ensure only ready nodes are considered.
  544. peers := make(map[string]*Peer)
  545. var readyPeers float64
  546. for k := range m.peers {
  547. if !m.peers[k].Ready() {
  548. continue
  549. }
  550. peers[k] = m.peers[k]
  551. readyPeers++
  552. }
  553. m.nodesGuage.Set(readyNodes)
  554. m.peersGuage.Set(readyPeers)
  555. // We cannot do anything with the topology until the local node is available.
  556. if nodes[m.hostname] == nil {
  557. return
  558. }
  559. t, err := NewTopology(nodes, peers, m.granularity, m.hostname, nodes[m.hostname].Endpoint.Port, m.priv, m.subnet)
  560. if err != nil {
  561. level.Error(m.logger).Log("error", err)
  562. m.errorCounter.WithLabelValues("apply").Inc()
  563. return
  564. }
  565. // Update the node's WireGuard IP.
  566. m.wireGuardIP = t.wireGuardCIDR
  567. conf := t.Conf()
  568. buf, err := conf.Bytes()
  569. if err != nil {
  570. level.Error(m.logger).Log("error", err)
  571. m.errorCounter.WithLabelValues("apply").Inc()
  572. return
  573. }
  574. if err := ioutil.WriteFile(ConfPath, buf, 0600); err != nil {
  575. level.Error(m.logger).Log("error", err)
  576. m.errorCounter.WithLabelValues("apply").Inc()
  577. return
  578. }
  579. ipRules := iptables.ForwardRules(m.subnet)
  580. // If we are handling local routes, ensure the local
  581. // tunnel has an IP address and IPIP traffic is allowed.
  582. if m.enc.Strategy() != encapsulation.Never && m.local {
  583. var cidrs []*net.IPNet
  584. for _, s := range t.segments {
  585. if s.location == nodes[m.hostname].Location {
  586. for i := range s.privateIPs {
  587. cidrs = append(cidrs, oneAddressCIDR(s.privateIPs[i]))
  588. }
  589. break
  590. }
  591. }
  592. ipRules = append(ipRules, m.enc.Rules(cidrs)...)
  593. // If we are handling local routes, ensure the local
  594. // tunnel has an IP address.
  595. if err := m.enc.Set(oneAddressCIDR(newAllocator(*nodes[m.hostname].Subnet).next().IP)); err != nil {
  596. level.Error(m.logger).Log("error", err)
  597. m.errorCounter.WithLabelValues("apply").Inc()
  598. return
  599. }
  600. }
  601. if err := m.ipTables.Set(ipRules); err != nil {
  602. level.Error(m.logger).Log("error", err)
  603. m.errorCounter.WithLabelValues("apply").Inc()
  604. return
  605. }
  606. // Find the Kilo interface name.
  607. link, err := linkByIndex(m.kiloIface)
  608. if err != nil {
  609. level.Error(m.logger).Log("error", err)
  610. m.errorCounter.WithLabelValues("apply").Inc()
  611. return
  612. }
  613. if t.leader {
  614. if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil {
  615. level.Error(m.logger).Log("error", err)
  616. m.errorCounter.WithLabelValues("apply").Inc()
  617. return
  618. }
  619. oldConf, err := wireguard.ShowConf(link.Attrs().Name)
  620. if err != nil {
  621. level.Error(m.logger).Log("error", err)
  622. m.errorCounter.WithLabelValues("apply").Inc()
  623. return
  624. }
  625. // Setting the WireGuard configuration interrupts existing connections
  626. // so only set the configuration if it has changed.
  627. equal := conf.Equal(wireguard.Parse(oldConf))
  628. if !equal {
  629. level.Info(m.logger).Log("msg", "WireGuard configurations are different")
  630. if err := wireguard.SetConf(link.Attrs().Name, ConfPath); err != nil {
  631. level.Error(m.logger).Log("error", err)
  632. m.errorCounter.WithLabelValues("apply").Inc()
  633. return
  634. }
  635. }
  636. if err := iproute.Set(m.kiloIface, true); err != nil {
  637. level.Error(m.logger).Log("error", err)
  638. m.errorCounter.WithLabelValues("apply").Inc()
  639. return
  640. }
  641. } else {
  642. level.Debug(m.logger).Log("msg", "local node is not the leader")
  643. if err := iproute.Set(m.kiloIface, false); err != nil {
  644. level.Error(m.logger).Log("error", err)
  645. m.errorCounter.WithLabelValues("apply").Inc()
  646. return
  647. }
  648. }
  649. // We need to add routes last since they may depend
  650. // on the WireGuard interface.
  651. routes, rules := t.Routes(link.Attrs().Name, m.kiloIface, m.privIface, m.enc.Index(), m.local, m.enc)
  652. if err := m.table.Set(routes, rules); err != nil {
  653. level.Error(m.logger).Log("error", err)
  654. m.errorCounter.WithLabelValues("apply").Inc()
  655. }
  656. }
  657. // RegisterMetrics registers Prometheus metrics on the given Prometheus
  658. // registerer.
  659. func (m *Mesh) RegisterMetrics(r prometheus.Registerer) {
  660. r.MustRegister(
  661. m.errorCounter,
  662. m.nodesGuage,
  663. m.peersGuage,
  664. m.reconcileCounter,
  665. )
  666. }
  667. // Stop stops the mesh.
  668. func (m *Mesh) Stop() {
  669. close(m.stop)
  670. }
  671. func (m *Mesh) cleanUp() {
  672. if err := m.ipTables.CleanUp(); err != nil {
  673. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err))
  674. m.errorCounter.WithLabelValues("cleanUp").Inc()
  675. }
  676. if err := m.table.CleanUp(); err != nil {
  677. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err))
  678. m.errorCounter.WithLabelValues("cleanUp").Inc()
  679. }
  680. if err := os.Remove(ConfPath); err != nil {
  681. level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err))
  682. m.errorCounter.WithLabelValues("cleanUp").Inc()
  683. }
  684. if m.cleanUpIface {
  685. if err := iproute.RemoveInterface(m.kiloIface); err != nil {
  686. level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove WireGuard interface: %v", err))
  687. m.errorCounter.WithLabelValues("cleanUp").Inc()
  688. }
  689. }
  690. if err := m.Nodes().CleanUp(m.hostname); err != nil {
  691. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up node backend: %v", err))
  692. m.errorCounter.WithLabelValues("cleanUp").Inc()
  693. }
  694. if err := m.Peers().CleanUp(m.hostname); err != nil {
  695. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up peer backend: %v", err))
  696. m.errorCounter.WithLabelValues("cleanUp").Inc()
  697. }
  698. if err := m.enc.CleanUp(); err != nil {
  699. level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up encapsulator: %v", err))
  700. m.errorCounter.WithLabelValues("cleanUp").Inc()
  701. }
  702. }
  703. func (m *Mesh) resolveEndpoints() error {
  704. for k := range m.nodes {
  705. // Skip unready nodes, since they will not be used
  706. // in the topology anyways.
  707. if !m.nodes[k].Ready() {
  708. continue
  709. }
  710. // If the node is ready, then the endpoint is not nil
  711. // but it may not have a DNS name.
  712. if m.nodes[k].Endpoint.DNS == "" {
  713. continue
  714. }
  715. if err := resolveEndpoint(m.nodes[k].Endpoint); err != nil {
  716. return err
  717. }
  718. }
  719. for k := range m.peers {
  720. // Skip unready peers, since they will not be used
  721. // in the topology anyways.
  722. if !m.peers[k].Ready() {
  723. continue
  724. }
  725. // If the peer is ready, then the endpoint is not nil
  726. // but it may not have a DNS name.
  727. if m.peers[k].Endpoint.DNS == "" {
  728. continue
  729. }
  730. if err := resolveEndpoint(m.peers[k].Endpoint); err != nil {
  731. return err
  732. }
  733. }
  734. return nil
  735. }
  736. func resolveEndpoint(endpoint *wireguard.Endpoint) error {
  737. ips, err := net.LookupIP(endpoint.DNS)
  738. if err != nil {
  739. return fmt.Errorf("failed to look up DNS name %q: %v", endpoint.DNS, err)
  740. }
  741. nets := make([]*net.IPNet, len(ips), len(ips))
  742. for i := range ips {
  743. nets[i] = oneAddressCIDR(ips[i])
  744. }
  745. sortIPs(nets)
  746. if len(nets) == 0 {
  747. return fmt.Errorf("did not find any addresses for DNS name %q", endpoint.DNS)
  748. }
  749. endpoint.IP = nets[0].IP
  750. return nil
  751. }
  752. func isSelf(hostname string, node *Node) bool {
  753. return node != nil && node.Name == hostname
  754. }
  755. func nodesAreEqual(a, b *Node) bool {
  756. if !(a != nil) == (b != nil) {
  757. return false
  758. }
  759. if a == b {
  760. return true
  761. }
  762. if !(a.Endpoint != nil) == (b.Endpoint != nil) {
  763. return false
  764. }
  765. if a.Endpoint != nil {
  766. if a.Endpoint.Port != b.Endpoint.Port {
  767. return false
  768. }
  769. // Check the DNS name first since this package
  770. // is doing the DNS resolution.
  771. if a.Endpoint.DNS != b.Endpoint.DNS {
  772. return false
  773. }
  774. if a.Endpoint.DNS == "" && !a.Endpoint.IP.Equal(b.Endpoint.IP) {
  775. return false
  776. }
  777. }
  778. // Ignore LastSeen when comparing equality we want to check if the nodes are
  779. // equivalent. However, we do want to check if LastSeen has transitioned
  780. // between valid and invalid.
  781. return string(a.Key) == string(b.Key) && ipNetsEqual(a.WireGuardIP, b.WireGuardIP) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) && a.Ready() == b.Ready()
  782. }
  783. func peersAreEqual(a, b *Peer) bool {
  784. if !(a != nil) == (b != nil) {
  785. return false
  786. }
  787. if a == b {
  788. return true
  789. }
  790. if !(a.Endpoint != nil) == (b.Endpoint != nil) {
  791. return false
  792. }
  793. if a.Endpoint != nil {
  794. if a.Endpoint.Port != b.Endpoint.Port {
  795. return false
  796. }
  797. // Check the DNS name first since this package
  798. // is doing the DNS resolution.
  799. if a.Endpoint.DNS != b.Endpoint.DNS {
  800. return false
  801. }
  802. if a.Endpoint.DNS == "" && !a.Endpoint.IP.Equal(b.Endpoint.IP) {
  803. return false
  804. }
  805. }
  806. if len(a.AllowedIPs) != len(b.AllowedIPs) {
  807. return false
  808. }
  809. for i := range a.AllowedIPs {
  810. if !ipNetsEqual(a.AllowedIPs[i], b.AllowedIPs[i]) {
  811. return false
  812. }
  813. }
  814. return string(a.PublicKey) == string(b.PublicKey) && a.PersistentKeepalive == b.PersistentKeepalive
  815. }
  816. func ipNetsEqual(a, b *net.IPNet) bool {
  817. if a == nil && b == nil {
  818. return true
  819. }
  820. if (a != nil) != (b != nil) {
  821. return false
  822. }
  823. if a.Mask.String() != b.Mask.String() {
  824. return false
  825. }
  826. return a.IP.Equal(b.IP)
  827. }
  828. func subnetsEqual(a, b *net.IPNet) bool {
  829. if a == nil && b == nil {
  830. return true
  831. }
  832. if (a != nil) != (b != nil) {
  833. return false
  834. }
  835. if a.Mask.String() != b.Mask.String() {
  836. return false
  837. }
  838. if !a.Contains(b.IP) {
  839. return false
  840. }
  841. if !b.Contains(a.IP) {
  842. return false
  843. }
  844. return true
  845. }
  846. func linkByIndex(index int) (netlink.Link, error) {
  847. link, err := netlink.LinkByIndex(index)
  848. if err != nil {
  849. return nil, fmt.Errorf("failed to get interface: %v", err)
  850. }
  851. return link, nil
  852. }