blob: c26a6e4dc30625e85c651de52655bf36af71aa85 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001#ifndef __NET_FRAG_H__
2#define __NET_FRAG_H__
3
4struct netns_frags {
5 /* Keep atomic mem on separate cachelines in structs that include it */
6 atomic_t mem ____cacheline_aligned_in_smp;
7 /* sysctls */
8 int timeout;
9 int high_thresh;
10 int low_thresh;
11};
12
13/**
14 * fragment queue flags
15 *
16 * @INET_FRAG_FIRST_IN: first fragment has arrived
17 * @INET_FRAG_LAST_IN: final fragment has arrived
18 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
19 */
20enum {
21 INET_FRAG_FIRST_IN = BIT(0),
22 INET_FRAG_LAST_IN = BIT(1),
23 INET_FRAG_COMPLETE = BIT(2),
24};
25
26/**
27 * struct inet_frag_queue - fragment queue
28 *
29 * @lock: spinlock protecting the queue
30 * @timer: queue expiration timer
31 * @list: hash bucket list
32 * @refcnt: reference count of the queue
33 * @fragments: received fragments head
34 * @fragments_tail: received fragments tail
35 * @stamp: timestamp of the last received fragment
36 * @len: total length of the original datagram
37 * @meat: length of received fragments so far
38 * @flags: fragment queue flags
39 * @max_size: maximum received fragment size
40 * @net: namespace that this frag belongs to
41 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
42 */
43struct inet_frag_queue {
44 spinlock_t lock;
45 struct timer_list timer;
46 struct hlist_node list;
47 atomic_t refcnt;
48 struct sk_buff *fragments;
49 struct sk_buff *fragments_tail;
50 ktime_t stamp;
51 int len;
52 int meat;
53 __u8 flags;
54 u16 max_size;
55 struct netns_frags *net;
56 struct hlist_node list_evictor;
57};
58
59#define INETFRAGS_HASHSZ 1024
60
61/* averaged:
62 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
63 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
64 * struct frag_queue))
65 */
66#define INETFRAGS_MAXDEPTH 128
67
68struct inet_frag_bucket {
69 struct hlist_head chain;
70 spinlock_t chain_lock;
71};
72
73struct inet_frags {
74 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
75
76 struct work_struct frags_work;
77 unsigned int next_bucket;
78 unsigned long last_rebuild_jiffies;
79 bool rebuild;
80
81 /* The first call to hashfn is responsible to initialize
82 * rnd. This is best done with net_get_random_once.
83 *
84 * rnd_seqlock is used to let hash insertion detect
85 * when it needs to re-lookup the hash chain to use.
86 */
87 u32 rnd;
88 seqlock_t rnd_seqlock;
89 int qsize;
90
91 unsigned int (*hashfn)(const struct inet_frag_queue *);
92 bool (*match)(const struct inet_frag_queue *q,
93 const void *arg);
94 void (*constructor)(struct inet_frag_queue *q,
95 const void *arg);
96 void (*destructor)(struct inet_frag_queue *);
97 void (*skb_free)(struct sk_buff *);
98 void (*frag_expire)(unsigned long data);
99 struct kmem_cache *frags_cachep;
100 const char *frags_cache_name;
101};
102
103int inet_frags_init(struct inet_frags *);
104void inet_frags_fini(struct inet_frags *);
105
106static inline void inet_frags_init_net(struct netns_frags *nf)
107{
108 atomic_set(&nf->mem, 0);
109}
110void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
111
112void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
113void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
114struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
115 struct inet_frags *f, void *key, unsigned int hash);
116
117void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
118 const char *prefix);
119
120static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
121{
122 if (atomic_dec_and_test(&q->refcnt))
123 inet_frag_destroy(q, f);
124}
125
126static inline bool inet_frag_evicting(struct inet_frag_queue *q)
127{
128 return !hlist_unhashed(&q->list_evictor);
129}
130
131/* Memory Tracking Functions. */
132
133static inline int frag_mem_limit(struct netns_frags *nf)
134{
135 return atomic_read(&nf->mem);
136}
137
138static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
139{
140 atomic_sub(i, &nf->mem);
141}
142
143static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
144{
145 atomic_add(i, &nf->mem);
146}
147
148static inline int sum_frag_mem_limit(struct netns_frags *nf)
149{
150 return atomic_read(&nf->mem);
151}
152
153/* RFC 3168 support :
154 * We want to check ECN values of all fragments, do detect invalid combinations.
155 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
156 */
157#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
158#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
159#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
160#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
161
162extern const u8 ip_frag_ecn_table[16];
163
164#endif