blob: 3f0d5ac2acbd9f417750730366da1f2adf8af1de [file] [log] [blame]
Neale Ranns0bfe5d82016-08-25 15:29:12 +01001/*
2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/**
16 * \brief
17 * The load-balance object represents an ECMP choice. The buckets of a load
18 * balance object point to the sub-graph after the choice is made.
19 * THe load-balance object is also object type returned from a FIB table lookup.
20 * As such it needs to represent the case where there is only one coice. It may
21 * seem like overkill to use a load-balance object in this case, but the reason
22 * is for performance. If the load-balance object were not the result of the FIB
23 * lookup, then some other object would be. The case where there was ECMP
24 * this other object would need a load-balance as a parent and hence just add
25 * an unnecessary indirection.
26 *
27 * It is also the object in the DP that represents a via-fib-entry in a recursive
28 * route.
29 *
30 */
31
32#ifndef __LOAD_BALANCE_H__
33#define __LOAD_BALANCE_H__
34
35#include <vlib/vlib.h>
36#include <vnet/ip/lookup.h>
37#include <vnet/dpo/dpo.h>
38#include <vnet/fib/fib_types.h>
Neale Ranns32e1c012016-11-22 17:07:28 +000039#include <vnet/fib/fib_entry.h>
Neale Ranns0bfe5d82016-08-25 15:29:12 +010040
41/**
42 * Load-balance main
43 */
44typedef struct load_balance_main_t_
45{
46 vlib_combined_counter_main_t lbm_to_counters;
47 vlib_combined_counter_main_t lbm_via_counters;
48} load_balance_main_t;
49
50extern load_balance_main_t load_balance_main;
51
52/**
53 * The number of buckets that a load-balance object can have and still
54 * fit in one cache-line
55 */
56#define LB_NUM_INLINE_BUCKETS 4
57
58/**
59 * @brief One path from an [EU]CMP set that the client wants to add to a
60 * load-balance object
61 */
62typedef struct load_balance_path_t_ {
63 /**
64 * ID of the Data-path object.
65 */
66 dpo_id_t path_dpo;
67
68 /**
69 * The index of the FIB path
70 */
71 fib_node_index_t path_index;
72
73 /**
74 * weight for the path.
75 */
76 u32 path_weight;
77} load_balance_path_t;
78
79/**
Neale Rannsac64b712018-10-08 14:51:11 +000080 * Flags controlling load-balance creation and modification
81 */
82typedef enum load_balance_attr_t_ {
83 LOAD_BALANCE_ATTR_USES_MAP = 0,
84 LOAD_BALANCE_ATTR_STICKY = 1,
85} load_balance_attr_t;
86
87#define LOAD_BALANCE_ATTR_NAMES { \
88 [LOAD_BALANCE_ATTR_USES_MAP] = "uses-map", \
89 [LOAD_BALANCE_ATTR_STICKY] = "sticky", \
90}
91
92#define FOR_EACH_LOAD_BALANCE_ATTR(_attr) \
93 for (_attr = 0; _attr <= LOAD_BALANCE_ATTR_STICKY; _attr++)
94
95typedef enum load_balance_flags_t_ {
96 LOAD_BALANCE_FLAG_NONE = 0,
97 LOAD_BALANCE_FLAG_USES_MAP = (1 << 0),
98 LOAD_BALANCE_FLAG_STICKY = (1 << 1),
99} __attribute__((packed)) load_balance_flags_t;
100
101/**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100102 * The FIB DPO provieds;
103 * - load-balancing over the next DPOs in the chain/graph
104 * - per-route counters
105 */
106typedef struct load_balance_t_ {
107 /**
Dave Baracheb987d32018-05-03 08:26:39 -0400108 * required for pool_get_aligned.
109 * memebers used in the switch path come first!
110 */
111 CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
112
113 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100114 * number of buckets in the load-balance. always a power of 2.
115 */
116 u16 lb_n_buckets;
117 /**
118 * number of buckets in the load-balance - 1. used in the switch path
119 * as part of the hash calculation.
120 */
121 u16 lb_n_buckets_minus_1;
122
123 /**
124 * The protocol of packets that traverse this LB.
125 * need in combination with the flow hash config to determine how to hash.
126 * u8.
127 */
128 dpo_proto_t lb_proto;
129
130 /**
Neale Rannsac64b712018-10-08 14:51:11 +0000131 * Flags concenring the LB's creation and modification
132 */
133 load_balance_flags_t lb_flags;
134
135 /**
Neale Ranns32e1c012016-11-22 17:07:28 +0000136 * Flags from the load-balance's associated fib_entry_t
137 */
138 fib_entry_flag_t lb_fib_entry_flags;
139
140 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100141 * The number of locks, which is approximately the number of users,
142 * of this load-balance.
143 * Load-balance objects of via-entries are heavily shared by recursives,
144 * so the lock count is a u32.
145 */
146 u32 lb_locks;
147
148 /**
149 * index of the load-balance map, INVALID if this LB does not use one
150 */
151 index_t lb_map;
152
153 /**
Neale Ranns3ee44042016-10-03 13:05:48 +0100154 * This is the index of the uRPF list for this LB
155 */
156 index_t lb_urpf;
157
158 /**
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100159 * the hash config to use when selecting a bucket. this is a u16
160 */
161 flow_hash_config_t lb_hash_config;
162
163 /**
164 * Vector of buckets containing the next DPOs, sized as lbo_num
165 */
166 dpo_id_t *lb_buckets;
167
168 /**
169 * The rest of the cache line is used for buckets. In the common case
170 * where there there are less than 4 buckets, then the buckets are
171 * on the same cachlie and we save ourselves a pointer dereferance in
172 * the data-path.
173 */
174 dpo_id_t lb_buckets_inline[LB_NUM_INLINE_BUCKETS];
175} load_balance_t;
176
Damjan Marioncf478942016-11-07 14:57:50 +0100177STATIC_ASSERT(sizeof(load_balance_t) <= CLIB_CACHE_LINE_BYTES,
178 "A load_balance object size exceeds one cachline");
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100179
180/**
181 * Flags controlling load-balance formatting/display
182 */
183typedef enum load_balance_format_flags_t_ {
184 LOAD_BALANCE_FORMAT_NONE,
185 LOAD_BALANCE_FORMAT_DETAIL = (1 << 0),
186} load_balance_format_flags_t;
187
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100188extern index_t load_balance_create(u32 num_buckets,
189 dpo_proto_t lb_proto,
190 flow_hash_config_t fhc);
191extern void load_balance_multipath_update(
192 const dpo_id_t *dpo,
Neale Rannsc0790cf2017-01-05 01:01:47 -0800193 const load_balance_path_t * raw_next_hops,
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100194 load_balance_flags_t flags);
195
196extern void load_balance_set_bucket(index_t lbi,
197 u32 bucket,
198 const dpo_id_t *next);
Neale Ranns3ee44042016-10-03 13:05:48 +0100199extern void load_balance_set_urpf(index_t lbi,
200 index_t urpf);
Neale Ranns32e1c012016-11-22 17:07:28 +0000201extern void load_balance_set_fib_entry_flags(index_t lbi,
202 fib_entry_flag_t flags);
Neale Ranns3ee44042016-10-03 13:05:48 +0100203extern index_t load_balance_get_urpf(index_t lbi);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100204
205extern u8* format_load_balance(u8 * s, va_list * args);
206
207extern const dpo_id_t *load_balance_get_bucket(index_t lbi,
208 u32 bucket);
209extern int load_balance_is_drop(const dpo_id_t *dpo);
Neale Ranns91286372017-12-05 13:24:04 -0800210extern u16 load_balance_n_buckets(index_t lbi);
Neale Ranns0bfe5d82016-08-25 15:29:12 +0100211
212extern f64 load_balance_get_multipath_tolerance(void);
213
214/**
215 * The encapsulation breakages are for fast DP access
216 */
217extern load_balance_t *load_balance_pool;
218static inline load_balance_t*
219load_balance_get (index_t lbi)
220{
221 return (pool_elt_at_index(load_balance_pool, lbi));
222}
223
224#define LB_HAS_INLINE_BUCKETS(_lb) \
225 ((_lb)->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
226
227static inline const dpo_id_t *
228load_balance_get_bucket_i (const load_balance_t *lb,
229 u32 bucket)
230{
231 ASSERT(bucket < lb->lb_n_buckets);
232
233 if (PREDICT_TRUE(LB_HAS_INLINE_BUCKETS(lb)))
234 {
235 return (&lb->lb_buckets_inline[bucket]);
236 }
237 else
238 {
239 return (&lb->lb_buckets[bucket]);
240 }
241}
242
243extern void load_balance_module_init(void);
244
245#endif