blob: 313e3c11a15a09ed83d11fdc0047425825543e88 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IPv4 Forwarding Information Base: semantics.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/uaccess.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/jiffies.h>
21#include <linux/mm.h>
22#include <linux/string.h>
23#include <linux/socket.h>
24#include <linux/sockios.h>
25#include <linux/errno.h>
26#include <linux/in.h>
27#include <linux/inet.h>
28#include <linux/inetdevice.h>
29#include <linux/netdevice.h>
30#include <linux/if_arp.h>
31#include <linux/proc_fs.h>
32#include <linux/skbuff.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35
36#include <net/arp.h>
37#include <net/ip.h>
38#include <net/protocol.h>
39#include <net/route.h>
40#include <net/tcp.h>
41#include <net/sock.h>
42#include <net/ip_fib.h>
43#include <net/netlink.h>
44#include <net/nexthop.h>
45#include <net/lwtunnel.h>
46
47#include "fib_lookup.h"
48
49static DEFINE_SPINLOCK(fib_info_lock);
50static struct hlist_head *fib_info_hash;
51static struct hlist_head *fib_info_laddrhash;
52static unsigned int fib_info_hash_size;
53static unsigned int fib_info_cnt;
54
55#define DEVINDEX_HASHBITS 8
56#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
57static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
58
59#ifdef CONFIG_IP_ROUTE_MULTIPATH
60u32 fib_multipath_secret __read_mostly;
61
62#define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
66 nh++, nhsel++)
67
68#define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
73
74#else /* CONFIG_IP_ROUTE_MULTIPATH */
75
76/* Hope, that gcc will optimize it to get rid of dummy loop */
77
78#define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
81
82#define change_nexthops(fi) { \
83 int nhsel; \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
86
87#endif /* CONFIG_IP_ROUTE_MULTIPATH */
88
89#define endfor_nexthops(fi) }
90
91
92const struct fib_prop fib_props[RTN_MAX + 1] = {
93 [RTN_UNSPEC] = {
94 .error = 0,
95 .scope = RT_SCOPE_NOWHERE,
96 },
97 [RTN_UNICAST] = {
98 .error = 0,
99 .scope = RT_SCOPE_UNIVERSE,
100 },
101 [RTN_LOCAL] = {
102 .error = 0,
103 .scope = RT_SCOPE_HOST,
104 },
105 [RTN_BROADCAST] = {
106 .error = 0,
107 .scope = RT_SCOPE_LINK,
108 },
109 [RTN_ANYCAST] = {
110 .error = 0,
111 .scope = RT_SCOPE_LINK,
112 },
113 [RTN_MULTICAST] = {
114 .error = 0,
115 .scope = RT_SCOPE_UNIVERSE,
116 },
117 [RTN_BLACKHOLE] = {
118 .error = -EINVAL,
119 .scope = RT_SCOPE_UNIVERSE,
120 },
121 [RTN_UNREACHABLE] = {
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
124 },
125 [RTN_PROHIBIT] = {
126 .error = -EACCES,
127 .scope = RT_SCOPE_UNIVERSE,
128 },
129 [RTN_THROW] = {
130 .error = -EAGAIN,
131 .scope = RT_SCOPE_UNIVERSE,
132 },
133 [RTN_NAT] = {
134 .error = -EINVAL,
135 .scope = RT_SCOPE_NOWHERE,
136 },
137 [RTN_XRESOLVE] = {
138 .error = -EINVAL,
139 .scope = RT_SCOPE_NOWHERE,
140 },
141};
142
143static void rt_fibinfo_free(struct rtable __rcu **rtp)
144{
145 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
146
147 if (!rt)
148 return;
149
150 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151 * because we waited an RCU grace period before calling
152 * free_fib_info_rcu()
153 */
154
155 dst_free(&rt->dst);
156}
157
158static void free_nh_exceptions(struct fib_nh *nh)
159{
160 struct fnhe_hash_bucket *hash;
161 int i;
162
163 hash = rcu_dereference_protected(nh->nh_exceptions, 1);
164 if (!hash)
165 return;
166 for (i = 0; i < FNHE_HASH_SIZE; i++) {
167 struct fib_nh_exception *fnhe;
168
169 fnhe = rcu_dereference_protected(hash[i].chain, 1);
170 while (fnhe) {
171 struct fib_nh_exception *next;
172
173 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
174
175 rt_fibinfo_free(&fnhe->fnhe_rth_input);
176 rt_fibinfo_free(&fnhe->fnhe_rth_output);
177
178 kfree(fnhe);
179
180 fnhe = next;
181 }
182 }
183 kfree(hash);
184}
185
186static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
187{
188 int cpu;
189
190 if (!rtp)
191 return;
192
193 for_each_possible_cpu(cpu) {
194 struct rtable *rt;
195
196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
197 if (rt)
198 dst_free(&rt->dst);
199 }
200 free_percpu(rtp);
201}
202
203/* Release a nexthop info record */
204static void free_fib_info_rcu(struct rcu_head *head)
205{
206 struct fib_info *fi = container_of(head, struct fib_info, rcu);
207 struct dst_metrics *m;
208
209 change_nexthops(fi) {
210 if (nexthop_nh->nh_dev)
211 dev_put(nexthop_nh->nh_dev);
212 lwtstate_put(nexthop_nh->nh_lwtstate);
213 free_nh_exceptions(nexthop_nh);
214 rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
215 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
216 } endfor_nexthops(fi);
217
218 m = fi->fib_metrics;
219 if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
220 kfree(m);
221 kfree(fi);
222}
223
224void free_fib_info(struct fib_info *fi)
225{
226 if (fi->fib_dead == 0) {
227 pr_warn("Freeing alive fib_info %p\n", fi);
228 return;
229 }
230 fib_info_cnt--;
231#ifdef CONFIG_IP_ROUTE_CLASSID
232 change_nexthops(fi) {
233 if (nexthop_nh->nh_tclassid)
234 fi->fib_net->ipv4.fib_num_tclassid_users--;
235 } endfor_nexthops(fi);
236#endif
237 call_rcu(&fi->rcu, free_fib_info_rcu);
238}
239
240void fib_release_info(struct fib_info *fi)
241{
242 spin_lock_bh(&fib_info_lock);
243 if (fi && --fi->fib_treeref == 0) {
244 hlist_del(&fi->fib_hash);
245 if (fi->fib_prefsrc)
246 hlist_del(&fi->fib_lhash);
247 change_nexthops(fi) {
248 if (!nexthop_nh->nh_dev)
249 continue;
250 hlist_del(&nexthop_nh->nh_hash);
251 } endfor_nexthops(fi)
252 fi->fib_dead = 1;
253 fib_info_put(fi);
254 }
255 spin_unlock_bh(&fib_info_lock);
256}
257
258static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
259{
260 const struct fib_nh *onh = ofi->fib_nh;
261
262 for_nexthops(fi) {
263 if (nh->nh_oif != onh->nh_oif ||
264 nh->nh_gw != onh->nh_gw ||
265 nh->nh_scope != onh->nh_scope ||
266#ifdef CONFIG_IP_ROUTE_MULTIPATH
267 nh->nh_weight != onh->nh_weight ||
268#endif
269#ifdef CONFIG_IP_ROUTE_CLASSID
270 nh->nh_tclassid != onh->nh_tclassid ||
271#endif
272 lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
273 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
274 return -1;
275 onh++;
276 } endfor_nexthops(fi);
277 return 0;
278}
279
280static inline unsigned int fib_devindex_hashfn(unsigned int val)
281{
282 unsigned int mask = DEVINDEX_HASHSIZE - 1;
283
284 return (val ^
285 (val >> DEVINDEX_HASHBITS) ^
286 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
287}
288
289static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
290{
291 unsigned int mask = (fib_info_hash_size - 1);
292 unsigned int val = fi->fib_nhs;
293
294 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
295 val ^= (__force u32)fi->fib_prefsrc;
296 val ^= fi->fib_priority;
297 for_nexthops(fi) {
298 val ^= fib_devindex_hashfn(nh->nh_oif);
299 } endfor_nexthops(fi)
300
301 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
302}
303
304static struct fib_info *fib_find_info(const struct fib_info *nfi)
305{
306 struct hlist_head *head;
307 struct fib_info *fi;
308 unsigned int hash;
309
310 hash = fib_info_hashfn(nfi);
311 head = &fib_info_hash[hash];
312
313 hlist_for_each_entry(fi, head, fib_hash) {
314 if (!net_eq(fi->fib_net, nfi->fib_net))
315 continue;
316 if (fi->fib_nhs != nfi->fib_nhs)
317 continue;
318 if (nfi->fib_protocol == fi->fib_protocol &&
319 nfi->fib_scope == fi->fib_scope &&
320 nfi->fib_prefsrc == fi->fib_prefsrc &&
321 nfi->fib_priority == fi->fib_priority &&
322 nfi->fib_type == fi->fib_type &&
323 memcmp(nfi->fib_metrics, fi->fib_metrics,
324 sizeof(u32) * RTAX_MAX) == 0 &&
325 !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
326 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
327 return fi;
328 }
329
330 return NULL;
331}
332
333/* Check, that the gateway is already configured.
334 * Used only by redirect accept routine.
335 */
336int ip_fib_check_default(__be32 gw, struct net_device *dev)
337{
338 struct hlist_head *head;
339 struct fib_nh *nh;
340 unsigned int hash;
341
342 spin_lock(&fib_info_lock);
343
344 hash = fib_devindex_hashfn(dev->ifindex);
345 head = &fib_info_devhash[hash];
346 hlist_for_each_entry(nh, head, nh_hash) {
347 if (nh->nh_dev == dev &&
348 nh->nh_gw == gw &&
349 !(nh->nh_flags & RTNH_F_DEAD)) {
350 spin_unlock(&fib_info_lock);
351 return 0;
352 }
353 }
354
355 spin_unlock(&fib_info_lock);
356
357 return -1;
358}
359
360static inline size_t fib_nlmsg_size(struct fib_info *fi)
361{
362 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
363 + nla_total_size(4) /* RTA_TABLE */
364 + nla_total_size(4) /* RTA_DST */
365 + nla_total_size(4) /* RTA_PRIORITY */
366 + nla_total_size(4) /* RTA_PREFSRC */
367 + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
368
369 /* space for nested metrics */
370 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
371
372 if (fi->fib_nhs) {
373 size_t nh_encapsize = 0;
374 /* Also handles the special case fib_nhs == 1 */
375
376 /* each nexthop is packed in an attribute */
377 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
378
379 /* may contain flow and gateway attribute */
380 nhsize += 2 * nla_total_size(4);
381
382 /* grab encap info */
383 for_nexthops(fi) {
384 if (nh->nh_lwtstate) {
385 /* RTA_ENCAP_TYPE */
386 nh_encapsize += lwtunnel_get_encap_size(
387 nh->nh_lwtstate);
388 /* RTA_ENCAP */
389 nh_encapsize += nla_total_size(2);
390 }
391 } endfor_nexthops(fi);
392
393 /* all nexthops are packed in a nested attribute */
394 payload += nla_total_size((fi->fib_nhs * nhsize) +
395 nh_encapsize);
396
397 }
398
399 return payload;
400}
401
402void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
403 int dst_len, u32 tb_id, const struct nl_info *info,
404 unsigned int nlm_flags)
405{
406 struct sk_buff *skb;
407 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
408 int err = -ENOBUFS;
409
410 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
411 if (!skb)
412 goto errout;
413
414 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
415 fa->fa_type, key, dst_len,
416 fa->fa_tos, fa->fa_info, nlm_flags);
417 if (err < 0) {
418 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
419 WARN_ON(err == -EMSGSIZE);
420 kfree_skb(skb);
421 goto errout;
422 }
423 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
424 info->nlh, GFP_KERNEL);
425 return;
426errout:
427 if (err < 0)
428 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
429}
430
431static int fib_detect_death(struct fib_info *fi, int order,
432 struct fib_info **last_resort, int *last_idx,
433 int dflt)
434{
435 struct neighbour *n;
436 int state = NUD_NONE;
437
438 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
439 if (n) {
440 state = n->nud_state;
441 neigh_release(n);
442 } else {
443 return 0;
444 }
445 if (state == NUD_REACHABLE)
446 return 0;
447 if ((state & NUD_VALID) && order != dflt)
448 return 0;
449 if ((state & NUD_VALID) ||
450 (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
451 *last_resort = fi;
452 *last_idx = order;
453 }
454 return 1;
455}
456
457#ifdef CONFIG_IP_ROUTE_MULTIPATH
458
459static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
460{
461 int nhs = 0;
462
463 while (rtnh_ok(rtnh, remaining)) {
464 nhs++;
465 rtnh = rtnh_next(rtnh, &remaining);
466 }
467
468 /* leftover implies invalid nexthop configuration, discard it */
469 return remaining > 0 ? 0 : nhs;
470}
471
472static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
473 int remaining, struct fib_config *cfg)
474{
475 struct net *net = cfg->fc_nlinfo.nl_net;
476 int ret;
477
478 change_nexthops(fi) {
479 int attrlen;
480
481 if (!rtnh_ok(rtnh, remaining))
482 return -EINVAL;
483
484 if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
485 return -EINVAL;
486
487 nexthop_nh->nh_flags =
488 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
489 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
490 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
491
492 attrlen = rtnh_attrlen(rtnh);
493 if (attrlen > 0) {
494 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
495
496 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
497 nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
498#ifdef CONFIG_IP_ROUTE_CLASSID
499 nla = nla_find(attrs, attrlen, RTA_FLOW);
500 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
501 if (nexthop_nh->nh_tclassid)
502 fi->fib_net->ipv4.fib_num_tclassid_users++;
503#endif
504 nla = nla_find(attrs, attrlen, RTA_ENCAP);
505 if (nla) {
506 struct lwtunnel_state *lwtstate;
507 struct net_device *dev = NULL;
508 struct nlattr *nla_entype;
509
510 nla_entype = nla_find(attrs, attrlen,
511 RTA_ENCAP_TYPE);
512 if (!nla_entype)
513 goto err_inval;
514 if (cfg->fc_oif)
515 dev = __dev_get_by_index(net, cfg->fc_oif);
516 ret = lwtunnel_build_state(dev, nla_get_u16(
517 nla_entype),
518 nla, AF_INET, cfg,
519 &lwtstate);
520 if (ret)
521 goto errout;
522 nexthop_nh->nh_lwtstate =
523 lwtstate_get(lwtstate);
524 }
525 }
526
527 rtnh = rtnh_next(rtnh, &remaining);
528 } endfor_nexthops(fi);
529
530 return 0;
531
532err_inval:
533 ret = -EINVAL;
534
535errout:
536 return ret;
537}
538
539static void fib_rebalance(struct fib_info *fi)
540{
541 int total;
542 int w;
543 struct in_device *in_dev;
544
545 if (fi->fib_nhs < 2)
546 return;
547
548 total = 0;
549 for_nexthops(fi) {
550 if (nh->nh_flags & RTNH_F_DEAD)
551 continue;
552
553 in_dev = __in_dev_get_rtnl(nh->nh_dev);
554
555 if (in_dev &&
556 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
557 nh->nh_flags & RTNH_F_LINKDOWN)
558 continue;
559
560 total += nh->nh_weight;
561 } endfor_nexthops(fi);
562
563 w = 0;
564 change_nexthops(fi) {
565 int upper_bound;
566
567 in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev);
568
569 if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
570 upper_bound = -1;
571 } else if (in_dev &&
572 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
573 nexthop_nh->nh_flags & RTNH_F_LINKDOWN) {
574 upper_bound = -1;
575 } else {
576 w += nexthop_nh->nh_weight;
577 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
578 total) - 1;
579 }
580
581 atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
582 } endfor_nexthops(fi);
583
584 net_get_random_once(&fib_multipath_secret,
585 sizeof(fib_multipath_secret));
586}
587
588static inline void fib_add_weight(struct fib_info *fi,
589 const struct fib_nh *nh)
590{
591 fi->fib_weight += nh->nh_weight;
592}
593
594#else /* CONFIG_IP_ROUTE_MULTIPATH */
595
596#define fib_rebalance(fi) do { } while (0)
597#define fib_add_weight(fi, nh) do { } while (0)
598
599#endif /* CONFIG_IP_ROUTE_MULTIPATH */
600
601static int fib_encap_match(struct net *net, u16 encap_type,
602 struct nlattr *encap,
603 int oif, const struct fib_nh *nh,
604 const struct fib_config *cfg)
605{
606 struct lwtunnel_state *lwtstate;
607 struct net_device *dev = NULL;
608 int ret, result = 0;
609
610 if (encap_type == LWTUNNEL_ENCAP_NONE)
611 return 0;
612
613 if (oif)
614 dev = __dev_get_by_index(net, oif);
615 ret = lwtunnel_build_state(dev, encap_type, encap,
616 AF_INET, cfg, &lwtstate);
617 if (!ret) {
618 result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
619 lwtstate_free(lwtstate);
620 }
621
622 return result;
623}
624
625int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
626{
627 struct net *net = cfg->fc_nlinfo.nl_net;
628#ifdef CONFIG_IP_ROUTE_MULTIPATH
629 struct rtnexthop *rtnh;
630 int remaining;
631#endif
632
633 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
634 return 1;
635
636 if (cfg->fc_oif || cfg->fc_gw) {
637 if (cfg->fc_encap) {
638 if (fib_encap_match(net, cfg->fc_encap_type,
639 cfg->fc_encap, cfg->fc_oif,
640 fi->fib_nh, cfg))
641 return 1;
642 }
643 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
644 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
645 return 0;
646 return 1;
647 }
648
649#ifdef CONFIG_IP_ROUTE_MULTIPATH
650 if (!cfg->fc_mp)
651 return 0;
652
653 rtnh = cfg->fc_mp;
654 remaining = cfg->fc_mp_len;
655
656 for_nexthops(fi) {
657 int attrlen;
658
659 if (!rtnh_ok(rtnh, remaining))
660 return -EINVAL;
661
662 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
663 return 1;
664
665 attrlen = rtnh_attrlen(rtnh);
666 if (attrlen > 0) {
667 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
668
669 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
670 if (nla && nla_get_in_addr(nla) != nh->nh_gw)
671 return 1;
672#ifdef CONFIG_IP_ROUTE_CLASSID
673 nla = nla_find(attrs, attrlen, RTA_FLOW);
674 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
675 return 1;
676#endif
677 }
678
679 rtnh = rtnh_next(rtnh, &remaining);
680 } endfor_nexthops(fi);
681#endif
682 return 0;
683}
684
685
686/*
687 * Picture
688 * -------
689 *
690 * Semantics of nexthop is very messy by historical reasons.
691 * We have to take into account, that:
692 * a) gateway can be actually local interface address,
693 * so that gatewayed route is direct.
694 * b) gateway must be on-link address, possibly
695 * described not by an ifaddr, but also by a direct route.
696 * c) If both gateway and interface are specified, they should not
697 * contradict.
698 * d) If we use tunnel routes, gateway could be not on-link.
699 *
700 * Attempt to reconcile all of these (alas, self-contradictory) conditions
701 * results in pretty ugly and hairy code with obscure logic.
702 *
703 * I chose to generalized it instead, so that the size
704 * of code does not increase practically, but it becomes
705 * much more general.
706 * Every prefix is assigned a "scope" value: "host" is local address,
707 * "link" is direct route,
708 * [ ... "site" ... "interior" ... ]
709 * and "universe" is true gateway route with global meaning.
710 *
711 * Every prefix refers to a set of "nexthop"s (gw, oif),
712 * where gw must have narrower scope. This recursion stops
713 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
714 * which means that gw is forced to be on link.
715 *
716 * Code is still hairy, but now it is apparently logically
717 * consistent and very flexible. F.e. as by-product it allows
718 * to co-exists in peace independent exterior and interior
719 * routing processes.
720 *
721 * Normally it looks as following.
722 *
723 * {universe prefix} -> (gw, oif) [scope link]
724 * |
725 * |-> {link prefix} -> (gw, oif) [scope local]
726 * |
727 * |-> {local prefix} (terminal node)
728 */
729static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
730 struct fib_nh *nh)
731{
732 int err = 0;
733 struct net *net;
734 struct net_device *dev;
735
736 net = cfg->fc_nlinfo.nl_net;
737 if (nh->nh_gw) {
738 struct fib_result res;
739
740 if (nh->nh_flags & RTNH_F_ONLINK) {
741 unsigned int addr_type;
742
743 if (cfg->fc_scope >= RT_SCOPE_LINK)
744 return -EINVAL;
745 dev = __dev_get_by_index(net, nh->nh_oif);
746 if (!dev)
747 return -ENODEV;
748 if (!(dev->flags & IFF_UP))
749 return -ENETDOWN;
750 addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
751 if (addr_type != RTN_UNICAST)
752 return -EINVAL;
753 if (!netif_carrier_ok(dev))
754 nh->nh_flags |= RTNH_F_LINKDOWN;
755 nh->nh_dev = dev;
756 dev_hold(dev);
757 nh->nh_scope = RT_SCOPE_LINK;
758 return 0;
759 }
760 rcu_read_lock();
761 {
762 struct fib_table *tbl = NULL;
763 struct flowi4 fl4 = {
764 .daddr = nh->nh_gw,
765 .flowi4_scope = cfg->fc_scope + 1,
766 .flowi4_oif = nh->nh_oif,
767 .flowi4_iif = LOOPBACK_IFINDEX,
768 };
769
770 /* It is not necessary, but requires a bit of thinking */
771 if (fl4.flowi4_scope < RT_SCOPE_LINK)
772 fl4.flowi4_scope = RT_SCOPE_LINK;
773
774 if (cfg->fc_table)
775 tbl = fib_get_table(net, cfg->fc_table);
776
777 if (tbl)
778 err = fib_table_lookup(tbl, &fl4, &res,
779 FIB_LOOKUP_IGNORE_LINKSTATE |
780 FIB_LOOKUP_NOREF);
781
782 /* on error or if no table given do full lookup. This
783 * is needed for example when nexthops are in the local
784 * table rather than the given table
785 */
786 if (!tbl || err) {
787 err = fib_lookup(net, &fl4, &res,
788 FIB_LOOKUP_IGNORE_LINKSTATE);
789 }
790
791 if (err) {
792 rcu_read_unlock();
793 return err;
794 }
795 }
796 err = -EINVAL;
797 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
798 goto out;
799 nh->nh_scope = res.scope;
800 nh->nh_oif = FIB_RES_OIF(res);
801 nh->nh_dev = dev = FIB_RES_DEV(res);
802 if (!dev)
803 goto out;
804 dev_hold(dev);
805 if (!netif_carrier_ok(dev))
806 nh->nh_flags |= RTNH_F_LINKDOWN;
807 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
808 } else {
809 struct in_device *in_dev;
810
811 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
812 return -EINVAL;
813
814 rcu_read_lock();
815 err = -ENODEV;
816 in_dev = inetdev_by_index(net, nh->nh_oif);
817 if (!in_dev)
818 goto out;
819 err = -ENETDOWN;
820 if (!(in_dev->dev->flags & IFF_UP))
821 goto out;
822 nh->nh_dev = in_dev->dev;
823 dev_hold(nh->nh_dev);
824 nh->nh_scope = RT_SCOPE_HOST;
825 if (!netif_carrier_ok(nh->nh_dev))
826 nh->nh_flags |= RTNH_F_LINKDOWN;
827 err = 0;
828 }
829out:
830 rcu_read_unlock();
831 return err;
832}
833
834static inline unsigned int fib_laddr_hashfn(__be32 val)
835{
836 unsigned int mask = (fib_info_hash_size - 1);
837
838 return ((__force u32)val ^
839 ((__force u32)val >> 7) ^
840 ((__force u32)val >> 14)) & mask;
841}
842
843static struct hlist_head *fib_info_hash_alloc(int bytes)
844{
845 if (bytes <= PAGE_SIZE)
846 return kzalloc(bytes, GFP_KERNEL);
847 else
848 return (struct hlist_head *)
849 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
850 get_order(bytes));
851}
852
853static void fib_info_hash_free(struct hlist_head *hash, int bytes)
854{
855 if (!hash)
856 return;
857
858 if (bytes <= PAGE_SIZE)
859 kfree(hash);
860 else
861 free_pages((unsigned long) hash, get_order(bytes));
862}
863
864static void fib_info_hash_move(struct hlist_head *new_info_hash,
865 struct hlist_head *new_laddrhash,
866 unsigned int new_size)
867{
868 struct hlist_head *old_info_hash, *old_laddrhash;
869 unsigned int old_size = fib_info_hash_size;
870 unsigned int i, bytes;
871
872 spin_lock_bh(&fib_info_lock);
873 old_info_hash = fib_info_hash;
874 old_laddrhash = fib_info_laddrhash;
875 fib_info_hash_size = new_size;
876
877 for (i = 0; i < old_size; i++) {
878 struct hlist_head *head = &fib_info_hash[i];
879 struct hlist_node *n;
880 struct fib_info *fi;
881
882 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
883 struct hlist_head *dest;
884 unsigned int new_hash;
885
886 new_hash = fib_info_hashfn(fi);
887 dest = &new_info_hash[new_hash];
888 hlist_add_head(&fi->fib_hash, dest);
889 }
890 }
891 fib_info_hash = new_info_hash;
892
893 for (i = 0; i < old_size; i++) {
894 struct hlist_head *lhead = &fib_info_laddrhash[i];
895 struct hlist_node *n;
896 struct fib_info *fi;
897
898 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
899 struct hlist_head *ldest;
900 unsigned int new_hash;
901
902 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
903 ldest = &new_laddrhash[new_hash];
904 hlist_add_head(&fi->fib_lhash, ldest);
905 }
906 }
907 fib_info_laddrhash = new_laddrhash;
908
909 spin_unlock_bh(&fib_info_lock);
910
911 bytes = old_size * sizeof(struct hlist_head *);
912 fib_info_hash_free(old_info_hash, bytes);
913 fib_info_hash_free(old_laddrhash, bytes);
914}
915
916__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
917{
918 nh->nh_saddr = inet_select_addr(nh->nh_dev,
919 nh->nh_gw,
920 nh->nh_parent->fib_scope);
921 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
922
923 return nh->nh_saddr;
924}
925
926static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
927{
928 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
929 fib_prefsrc != cfg->fc_dst) {
930 u32 tb_id = cfg->fc_table;
931 int rc;
932
933 if (tb_id == RT_TABLE_MAIN)
934 tb_id = RT_TABLE_LOCAL;
935
936 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
937 fib_prefsrc, tb_id);
938
939 if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) {
940 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
941 fib_prefsrc, RT_TABLE_LOCAL);
942 }
943
944 if (rc != RTN_LOCAL)
945 return false;
946 }
947 return true;
948}
949
950static int
951fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
952{
953 bool ecn_ca = false;
954 struct nlattr *nla;
955 int remaining;
956
957 if (!cfg->fc_mx)
958 return 0;
959
960 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
961 int type = nla_type(nla);
962 u32 val;
963
964 if (!type)
965 continue;
966 if (type > RTAX_MAX)
967 return -EINVAL;
968
969 if (type == RTAX_CC_ALGO) {
970 char tmp[TCP_CA_NAME_MAX];
971
972 nla_strlcpy(tmp, nla, sizeof(tmp));
973 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
974 if (val == TCP_CA_UNSPEC)
975 return -EINVAL;
976 } else {
977 val = nla_get_u32(nla);
978 }
979 if (type == RTAX_ADVMSS && val > 65535 - 40)
980 val = 65535 - 40;
981 if (type == RTAX_MTU && val > 65535 - 15)
982 val = 65535 - 15;
983 if (type == RTAX_HOPLIMIT && val > 255)
984 val = 255;
985 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
986 return -EINVAL;
987 fi->fib_metrics->metrics[type - 1] = val;
988 }
989
990 if (ecn_ca)
991 fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
992
993 return 0;
994}
995
996struct fib_info *fib_create_info(struct fib_config *cfg)
997{
998 int err;
999 struct fib_info *fi = NULL;
1000 struct fib_info *ofi;
1001 int nhs = 1;
1002 struct net *net = cfg->fc_nlinfo.nl_net;
1003
1004 if (cfg->fc_type > RTN_MAX)
1005 goto err_inval;
1006
1007 /* Fast check to catch the most weird cases */
1008 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
1009 goto err_inval;
1010
1011 if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
1012 goto err_inval;
1013
1014#ifdef CONFIG_IP_ROUTE_MULTIPATH
1015 if (cfg->fc_mp) {
1016 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
1017 if (nhs == 0)
1018 goto err_inval;
1019 }
1020#endif
1021
1022 err = -ENOBUFS;
1023 if (fib_info_cnt >= fib_info_hash_size) {
1024 unsigned int new_size = fib_info_hash_size << 1;
1025 struct hlist_head *new_info_hash;
1026 struct hlist_head *new_laddrhash;
1027 unsigned int bytes;
1028
1029 if (!new_size)
1030 new_size = 16;
1031 bytes = new_size * sizeof(struct hlist_head *);
1032 new_info_hash = fib_info_hash_alloc(bytes);
1033 new_laddrhash = fib_info_hash_alloc(bytes);
1034 if (!new_info_hash || !new_laddrhash) {
1035 fib_info_hash_free(new_info_hash, bytes);
1036 fib_info_hash_free(new_laddrhash, bytes);
1037 } else
1038 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
1039
1040 if (!fib_info_hash_size)
1041 goto failure;
1042 }
1043
1044 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
1045 if (!fi)
1046 goto failure;
1047 if (cfg->fc_mx) {
1048 fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
1049 if (unlikely(!fi->fib_metrics)) {
1050 kfree(fi);
1051 return ERR_PTR(err);
1052 }
1053 atomic_set(&fi->fib_metrics->refcnt, 1);
1054 } else {
1055 fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
1056 }
1057 fib_info_cnt++;
1058 fi->fib_net = net;
1059 fi->fib_protocol = cfg->fc_protocol;
1060 fi->fib_scope = cfg->fc_scope;
1061 fi->fib_flags = cfg->fc_flags;
1062 fi->fib_priority = cfg->fc_priority;
1063 fi->fib_prefsrc = cfg->fc_prefsrc;
1064 fi->fib_type = cfg->fc_type;
1065
1066 fi->fib_nhs = nhs;
1067 change_nexthops(fi) {
1068 nexthop_nh->nh_parent = fi;
1069 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
1070 if (!nexthop_nh->nh_pcpu_rth_output)
1071 goto failure;
1072 } endfor_nexthops(fi)
1073
1074 err = fib_convert_metrics(fi, cfg);
1075 if (err)
1076 goto failure;
1077
1078 if (cfg->fc_mp) {
1079#ifdef CONFIG_IP_ROUTE_MULTIPATH
1080 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
1081 if (err != 0)
1082 goto failure;
1083 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
1084 goto err_inval;
1085 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
1086 goto err_inval;
1087#ifdef CONFIG_IP_ROUTE_CLASSID
1088 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
1089 goto err_inval;
1090#endif
1091#else
1092 goto err_inval;
1093#endif
1094 } else {
1095 struct fib_nh *nh = fi->fib_nh;
1096
1097 if (cfg->fc_encap) {
1098 struct lwtunnel_state *lwtstate;
1099 struct net_device *dev = NULL;
1100
1101 if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
1102 goto err_inval;
1103 if (cfg->fc_oif)
1104 dev = __dev_get_by_index(net, cfg->fc_oif);
1105 err = lwtunnel_build_state(dev, cfg->fc_encap_type,
1106 cfg->fc_encap, AF_INET, cfg,
1107 &lwtstate);
1108 if (err)
1109 goto failure;
1110
1111 nh->nh_lwtstate = lwtstate_get(lwtstate);
1112 }
1113 nh->nh_oif = cfg->fc_oif;
1114 nh->nh_gw = cfg->fc_gw;
1115 nh->nh_flags = cfg->fc_flags;
1116#ifdef CONFIG_IP_ROUTE_CLASSID
1117 nh->nh_tclassid = cfg->fc_flow;
1118 if (nh->nh_tclassid)
1119 fi->fib_net->ipv4.fib_num_tclassid_users++;
1120#endif
1121#ifdef CONFIG_IP_ROUTE_MULTIPATH
1122 nh->nh_weight = 1;
1123#endif
1124 }
1125
1126 if (fib_props[cfg->fc_type].error) {
1127 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
1128 goto err_inval;
1129 goto link_it;
1130 } else {
1131 switch (cfg->fc_type) {
1132 case RTN_UNICAST:
1133 case RTN_LOCAL:
1134 case RTN_BROADCAST:
1135 case RTN_ANYCAST:
1136 case RTN_MULTICAST:
1137 break;
1138 default:
1139 goto err_inval;
1140 }
1141 }
1142
1143 if (cfg->fc_scope > RT_SCOPE_HOST)
1144 goto err_inval;
1145
1146 if (cfg->fc_scope == RT_SCOPE_HOST) {
1147 struct fib_nh *nh = fi->fib_nh;
1148
1149 /* Local address is added. */
1150 if (nhs != 1 || nh->nh_gw)
1151 goto err_inval;
1152 nh->nh_scope = RT_SCOPE_NOWHERE;
1153 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
1154 err = -ENODEV;
1155 if (!nh->nh_dev)
1156 goto failure;
1157 } else {
1158 int linkdown = 0;
1159
1160 change_nexthops(fi) {
1161 err = fib_check_nh(cfg, fi, nexthop_nh);
1162 if (err != 0)
1163 goto failure;
1164 if (nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
1165 linkdown++;
1166 } endfor_nexthops(fi)
1167 if (linkdown == fi->fib_nhs)
1168 fi->fib_flags |= RTNH_F_LINKDOWN;
1169 }
1170
1171 if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
1172 goto err_inval;
1173
1174 change_nexthops(fi) {
1175 fib_info_update_nh_saddr(net, nexthop_nh);
1176 fib_add_weight(fi, nexthop_nh);
1177 } endfor_nexthops(fi)
1178
1179 fib_rebalance(fi);
1180
1181link_it:
1182 ofi = fib_find_info(fi);
1183 if (ofi) {
1184 fi->fib_dead = 1;
1185 free_fib_info(fi);
1186 ofi->fib_treeref++;
1187 return ofi;
1188 }
1189
1190 fi->fib_treeref++;
1191 atomic_inc(&fi->fib_clntref);
1192 spin_lock_bh(&fib_info_lock);
1193 hlist_add_head(&fi->fib_hash,
1194 &fib_info_hash[fib_info_hashfn(fi)]);
1195 if (fi->fib_prefsrc) {
1196 struct hlist_head *head;
1197
1198 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
1199 hlist_add_head(&fi->fib_lhash, head);
1200 }
1201 change_nexthops(fi) {
1202 struct hlist_head *head;
1203 unsigned int hash;
1204
1205 if (!nexthop_nh->nh_dev)
1206 continue;
1207 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
1208 head = &fib_info_devhash[hash];
1209 hlist_add_head(&nexthop_nh->nh_hash, head);
1210 } endfor_nexthops(fi)
1211 spin_unlock_bh(&fib_info_lock);
1212 return fi;
1213
1214err_inval:
1215 err = -EINVAL;
1216
1217failure:
1218 if (fi) {
1219 fi->fib_dead = 1;
1220 free_fib_info(fi);
1221 }
1222
1223 return ERR_PTR(err);
1224}
1225
1226int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1227 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
1228 struct fib_info *fi, unsigned int flags)
1229{
1230 struct nlmsghdr *nlh;
1231 struct rtmsg *rtm;
1232
1233 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1234 if (!nlh)
1235 return -EMSGSIZE;
1236
1237 rtm = nlmsg_data(nlh);
1238 rtm->rtm_family = AF_INET;
1239 rtm->rtm_dst_len = dst_len;
1240 rtm->rtm_src_len = 0;
1241 rtm->rtm_tos = tos;
1242 if (tb_id < 256)
1243 rtm->rtm_table = tb_id;
1244 else
1245 rtm->rtm_table = RT_TABLE_COMPAT;
1246 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1247 goto nla_put_failure;
1248 rtm->rtm_type = type;
1249 rtm->rtm_flags = fi->fib_flags;
1250 rtm->rtm_scope = fi->fib_scope;
1251 rtm->rtm_protocol = fi->fib_protocol;
1252
1253 if (rtm->rtm_dst_len &&
1254 nla_put_in_addr(skb, RTA_DST, dst))
1255 goto nla_put_failure;
1256 if (fi->fib_priority &&
1257 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1258 goto nla_put_failure;
1259 if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
1260 goto nla_put_failure;
1261
1262 if (fi->fib_prefsrc &&
1263 nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
1264 goto nla_put_failure;
1265 if (fi->fib_nhs == 1) {
1266 struct in_device *in_dev;
1267
1268 if (fi->fib_nh->nh_gw &&
1269 nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1270 goto nla_put_failure;
1271 if (fi->fib_nh->nh_oif &&
1272 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1273 goto nla_put_failure;
1274 if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
1275 in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev);
1276 if (in_dev &&
1277 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1278 rtm->rtm_flags |= RTNH_F_DEAD;
1279 }
1280#ifdef CONFIG_IP_ROUTE_CLASSID
1281 if (fi->fib_nh[0].nh_tclassid &&
1282 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1283 goto nla_put_failure;
1284#endif
1285 if (fi->fib_nh->nh_lwtstate &&
1286 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
1287 goto nla_put_failure;
1288 }
1289#ifdef CONFIG_IP_ROUTE_MULTIPATH
1290 if (fi->fib_nhs > 1) {
1291 struct rtnexthop *rtnh;
1292 struct nlattr *mp;
1293
1294 mp = nla_nest_start(skb, RTA_MULTIPATH);
1295 if (!mp)
1296 goto nla_put_failure;
1297
1298 for_nexthops(fi) {
1299 struct in_device *in_dev;
1300
1301 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1302 if (!rtnh)
1303 goto nla_put_failure;
1304
1305 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1306 if (nh->nh_flags & RTNH_F_LINKDOWN) {
1307 in_dev = __in_dev_get_rtnl(nh->nh_dev);
1308 if (in_dev &&
1309 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
1310 rtnh->rtnh_flags |= RTNH_F_DEAD;
1311 }
1312 rtnh->rtnh_hops = nh->nh_weight - 1;
1313 rtnh->rtnh_ifindex = nh->nh_oif;
1314
1315 if (nh->nh_gw &&
1316 nla_put_in_addr(skb, RTA_GATEWAY, nh->nh_gw))
1317 goto nla_put_failure;
1318#ifdef CONFIG_IP_ROUTE_CLASSID
1319 if (nh->nh_tclassid &&
1320 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1321 goto nla_put_failure;
1322#endif
1323 if (nh->nh_lwtstate &&
1324 lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
1325 goto nla_put_failure;
1326
1327 /* length of rtnetlink header + attributes */
1328 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1329 } endfor_nexthops(fi);
1330
1331 nla_nest_end(skb, mp);
1332 }
1333#endif
1334 nlmsg_end(skb, nlh);
1335 return 0;
1336
1337nla_put_failure:
1338 nlmsg_cancel(skb, nlh);
1339 return -EMSGSIZE;
1340}
1341
1342/*
1343 * Update FIB if:
1344 * - local address disappeared -> we must delete all the entries
1345 * referring to it.
1346 * - device went down -> we must shutdown all nexthops going via it.
1347 */
1348int fib_sync_down_addr(struct net *net, __be32 local)
1349{
1350 int ret = 0;
1351 unsigned int hash = fib_laddr_hashfn(local);
1352 struct hlist_head *head = &fib_info_laddrhash[hash];
1353 struct fib_info *fi;
1354
1355 if (!fib_info_laddrhash || local == 0)
1356 return 0;
1357
1358 hlist_for_each_entry(fi, head, fib_lhash) {
1359 if (!net_eq(fi->fib_net, net))
1360 continue;
1361 if (fi->fib_prefsrc == local) {
1362 fi->fib_flags |= RTNH_F_DEAD;
1363 ret++;
1364 }
1365 }
1366 return ret;
1367}
1368
1369/* Event force Flags Description
1370 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1371 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1372 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1373 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1374 */
1375int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
1376{
1377 int ret = 0;
1378 int scope = RT_SCOPE_NOWHERE;
1379 struct fib_info *prev_fi = NULL;
1380 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1381 struct hlist_head *head = &fib_info_devhash[hash];
1382 struct fib_nh *nh;
1383
1384 if (force)
1385 scope = -1;
1386
1387 hlist_for_each_entry(nh, head, nh_hash) {
1388 struct fib_info *fi = nh->nh_parent;
1389 int dead;
1390
1391 BUG_ON(!fi->fib_nhs);
1392 if (nh->nh_dev != dev || fi == prev_fi)
1393 continue;
1394 prev_fi = fi;
1395 dead = 0;
1396 change_nexthops(fi) {
1397 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1398 dead++;
1399 else if (nexthop_nh->nh_dev == dev &&
1400 nexthop_nh->nh_scope != scope) {
1401 switch (event) {
1402 case NETDEV_DOWN:
1403 case NETDEV_UNREGISTER:
1404 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1405 /* fall through */
1406 case NETDEV_CHANGE:
1407 nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
1408 break;
1409 }
1410 dead++;
1411 }
1412#ifdef CONFIG_IP_ROUTE_MULTIPATH
1413 if (event == NETDEV_UNREGISTER &&
1414 nexthop_nh->nh_dev == dev) {
1415 dead = fi->fib_nhs;
1416 break;
1417 }
1418#endif
1419 } endfor_nexthops(fi)
1420 if (dead == fi->fib_nhs) {
1421 switch (event) {
1422 case NETDEV_DOWN:
1423 case NETDEV_UNREGISTER:
1424 fi->fib_flags |= RTNH_F_DEAD;
1425 /* fall through */
1426 case NETDEV_CHANGE:
1427 fi->fib_flags |= RTNH_F_LINKDOWN;
1428 break;
1429 }
1430 ret++;
1431 }
1432
1433 fib_rebalance(fi);
1434 }
1435
1436 return ret;
1437}
1438
1439/* Must be invoked inside of an RCU protected region. */
1440void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1441{
1442 struct fib_info *fi = NULL, *last_resort = NULL;
1443 struct hlist_head *fa_head = res->fa_head;
1444 struct fib_table *tb = res->table;
1445 u8 slen = 32 - res->prefixlen;
1446 int order = -1, last_idx = -1;
1447 struct fib_alias *fa, *fa1 = NULL;
1448 u32 last_prio = res->fi->fib_priority;
1449 u8 last_tos = 0;
1450
1451 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1452 struct fib_info *next_fi = fa->fa_info;
1453
1454 if (fa->fa_slen != slen)
1455 continue;
1456 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1457 continue;
1458 if (fa->tb_id != tb->tb_id)
1459 continue;
1460 if (next_fi->fib_priority > last_prio &&
1461 fa->fa_tos == last_tos) {
1462 if (last_tos)
1463 continue;
1464 break;
1465 }
1466 if (next_fi->fib_flags & RTNH_F_DEAD)
1467 continue;
1468 last_tos = fa->fa_tos;
1469 last_prio = next_fi->fib_priority;
1470
1471 if (next_fi->fib_scope != res->scope ||
1472 fa->fa_type != RTN_UNICAST)
1473 continue;
1474 if (!next_fi->fib_nh[0].nh_gw ||
1475 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1476 continue;
1477
1478 fib_alias_accessed(fa);
1479
1480 if (!fi) {
1481 if (next_fi != res->fi)
1482 break;
1483 fa1 = fa;
1484 } else if (!fib_detect_death(fi, order, &last_resort,
1485 &last_idx, fa1->fa_default)) {
1486 fib_result_assign(res, fi);
1487 fa1->fa_default = order;
1488 goto out;
1489 }
1490 fi = next_fi;
1491 order++;
1492 }
1493
1494 if (order <= 0 || !fi) {
1495 if (fa1)
1496 fa1->fa_default = -1;
1497 goto out;
1498 }
1499
1500 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1501 fa1->fa_default)) {
1502 fib_result_assign(res, fi);
1503 fa1->fa_default = order;
1504 goto out;
1505 }
1506
1507 if (last_idx >= 0)
1508 fib_result_assign(res, last_resort);
1509 fa1->fa_default = last_idx;
1510out:
1511 return;
1512}
1513
1514/*
1515 * Dead device goes up. We wake up dead nexthops.
1516 * It takes sense only on multipath routes.
1517 */
1518int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
1519{
1520 struct fib_info *prev_fi;
1521 unsigned int hash;
1522 struct hlist_head *head;
1523 struct fib_nh *nh;
1524 int ret;
1525
1526 if (!(dev->flags & IFF_UP))
1527 return 0;
1528
1529 if (nh_flags & RTNH_F_DEAD) {
1530 unsigned int flags = dev_get_flags(dev);
1531
1532 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1533 nh_flags |= RTNH_F_LINKDOWN;
1534 }
1535
1536 prev_fi = NULL;
1537 hash = fib_devindex_hashfn(dev->ifindex);
1538 head = &fib_info_devhash[hash];
1539 ret = 0;
1540
1541 hlist_for_each_entry(nh, head, nh_hash) {
1542 struct fib_info *fi = nh->nh_parent;
1543 int alive;
1544
1545 BUG_ON(!fi->fib_nhs);
1546 if (nh->nh_dev != dev || fi == prev_fi)
1547 continue;
1548
1549 prev_fi = fi;
1550 alive = 0;
1551 change_nexthops(fi) {
1552 if (!(nexthop_nh->nh_flags & nh_flags)) {
1553 alive++;
1554 continue;
1555 }
1556 if (!nexthop_nh->nh_dev ||
1557 !(nexthop_nh->nh_dev->flags & IFF_UP))
1558 continue;
1559 if (nexthop_nh->nh_dev != dev ||
1560 !__in_dev_get_rtnl(dev))
1561 continue;
1562 alive++;
1563 nexthop_nh->nh_flags &= ~nh_flags;
1564 } endfor_nexthops(fi)
1565
1566 if (alive > 0) {
1567 fi->fib_flags &= ~nh_flags;
1568 ret++;
1569 }
1570
1571 fib_rebalance(fi);
1572 }
1573
1574 return ret;
1575}
1576
1577#ifdef CONFIG_IP_ROUTE_MULTIPATH
1578
1579void fib_select_multipath(struct fib_result *res, int hash)
1580{
1581 struct fib_info *fi = res->fi;
1582
1583 for_nexthops(fi) {
1584 if (hash > atomic_read(&nh->nh_upper_bound))
1585 continue;
1586
1587 res->nh_sel = nhsel;
1588 return;
1589 } endfor_nexthops(fi);
1590
1591 /* Race condition: route has just become dead. */
1592 res->nh_sel = 0;
1593}
1594#endif
1595
1596void fib_select_path(struct net *net, struct fib_result *res,
1597 struct flowi4 *fl4, int mp_hash)
1598{
1599 bool oif_check;
1600
1601 oif_check = (fl4->flowi4_oif == 0 ||
1602 fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
1603
1604#ifdef CONFIG_IP_ROUTE_MULTIPATH
1605 if (res->fi->fib_nhs > 1 && oif_check) {
1606 if (mp_hash < 0)
1607 mp_hash = get_hash_from_flowi4(fl4) >> 1;
1608
1609 fib_select_multipath(res, mp_hash);
1610 }
1611 else
1612#endif
1613 if (!res->prefixlen &&
1614 res->table->tb_num_default > 1 &&
1615 res->type == RTN_UNICAST && oif_check)
1616 fib_select_default(fl4, res);
1617
1618 if (!fl4->saddr)
1619 fl4->saddr = FIB_RES_PREFSRC(net, *res);
1620}
1621EXPORT_SYMBOL_GPL(fib_select_path);