blob: 786176b1a0ee1fa898ea3acfc063976a312620ea [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
4 *
5 * Generic LRU infrastructure
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/list_lru.h>
11#include <linux/slab.h>
12#include <linux/mutex.h>
13#include <linux/memcontrol.h>
14
15#ifdef CONFIG_MEMCG_KMEM
16static LIST_HEAD(list_lrus);
17static DEFINE_MUTEX(list_lrus_mutex);
18
19static void list_lru_register(struct list_lru *lru)
20{
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
24}
25
26static void list_lru_unregister(struct list_lru *lru)
27{
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
31}
32#else
33static void list_lru_register(struct list_lru *lru)
34{
35}
36
37static void list_lru_unregister(struct list_lru *lru)
38{
39}
40#endif /* CONFIG_MEMCG_KMEM */
41
42#ifdef CONFIG_MEMCG_KMEM
43static inline bool list_lru_memcg_aware(struct list_lru *lru)
44{
45 /*
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
48 */
49 return !!lru->node[0].memcg_lrus;
50}
51
52static inline struct list_lru_one *
53list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
54{
55 /*
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
58 */
59 lockdep_assert_held(&nlru->lock);
60 if (nlru->memcg_lrus && idx >= 0)
61 return nlru->memcg_lrus->lru[idx];
62
63 return &nlru->lru;
64}
65
66static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
67{
68 struct page *page;
69
70 if (!memcg_kmem_enabled())
71 return NULL;
72 page = virt_to_head_page(ptr);
73 return page->mem_cgroup;
74}
75
76static inline struct list_lru_one *
77list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
78{
79 struct mem_cgroup *memcg;
80
81 if (!nlru->memcg_lrus)
82 return &nlru->lru;
83
84 memcg = mem_cgroup_from_kmem(ptr);
85 if (!memcg)
86 return &nlru->lru;
87
88 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
89}
90#else
91static inline bool list_lru_memcg_aware(struct list_lru *lru)
92{
93 return false;
94}
95
96static inline struct list_lru_one *
97list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
98{
99 return &nlru->lru;
100}
101
102static inline struct list_lru_one *
103list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
104{
105 return &nlru->lru;
106}
107#endif /* CONFIG_MEMCG_KMEM */
108
109bool list_lru_add(struct list_lru *lru, struct list_head *item)
110{
111 int nid = page_to_nid(virt_to_page(item));
112 struct list_lru_node *nlru = &lru->node[nid];
113 struct list_lru_one *l;
114
115 spin_lock(&nlru->lock);
116 if (list_empty(item)) {
117 l = list_lru_from_kmem(nlru, item);
118 list_add_tail(item, &l->list);
119 l->nr_items++;
120 nlru->nr_items++;
121 spin_unlock(&nlru->lock);
122 return true;
123 }
124 spin_unlock(&nlru->lock);
125 return false;
126}
127EXPORT_SYMBOL_GPL(list_lru_add);
128
129bool list_lru_del(struct list_lru *lru, struct list_head *item)
130{
131 int nid = page_to_nid(virt_to_page(item));
132 struct list_lru_node *nlru = &lru->node[nid];
133 struct list_lru_one *l;
134
135 spin_lock(&nlru->lock);
136 if (!list_empty(item)) {
137 l = list_lru_from_kmem(nlru, item);
138 list_del_init(item);
139 l->nr_items--;
140 nlru->nr_items--;
141 spin_unlock(&nlru->lock);
142 return true;
143 }
144 spin_unlock(&nlru->lock);
145 return false;
146}
147EXPORT_SYMBOL_GPL(list_lru_del);
148
149void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
150{
151 list_del_init(item);
152 list->nr_items--;
153}
154EXPORT_SYMBOL_GPL(list_lru_isolate);
155
156void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
157 struct list_head *head)
158{
159 list_move(item, head);
160 list->nr_items--;
161}
162EXPORT_SYMBOL_GPL(list_lru_isolate_move);
163
164static unsigned long __list_lru_count_one(struct list_lru *lru,
165 int nid, int memcg_idx)
166{
167 struct list_lru_node *nlru = &lru->node[nid];
168 struct list_lru_one *l;
169 unsigned long count;
170
171 spin_lock(&nlru->lock);
172 l = list_lru_from_memcg_idx(nlru, memcg_idx);
173 count = l->nr_items;
174 spin_unlock(&nlru->lock);
175
176 return count;
177}
178
179unsigned long list_lru_count_one(struct list_lru *lru,
180 int nid, struct mem_cgroup *memcg)
181{
182 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
183}
184EXPORT_SYMBOL_GPL(list_lru_count_one);
185
186unsigned long list_lru_count_node(struct list_lru *lru, int nid)
187{
188 struct list_lru_node *nlru;
189
190 nlru = &lru->node[nid];
191 return nlru->nr_items;
192}
193EXPORT_SYMBOL_GPL(list_lru_count_node);
194
195static unsigned long
196__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
197 list_lru_walk_cb isolate, void *cb_arg,
198 unsigned long *nr_to_walk)
199{
200
201 struct list_lru_node *nlru = &lru->node[nid];
202 struct list_lru_one *l;
203 struct list_head *item, *n;
204 unsigned long isolated = 0;
205
206 spin_lock(&nlru->lock);
207 l = list_lru_from_memcg_idx(nlru, memcg_idx);
208restart:
209 list_for_each_safe(item, n, &l->list) {
210 enum lru_status ret;
211
212 /*
213 * decrement nr_to_walk first so that we don't livelock if we
214 * get stuck on large numbesr of LRU_RETRY items
215 */
216 if (!*nr_to_walk)
217 break;
218 --*nr_to_walk;
219
220 ret = isolate(item, l, &nlru->lock, cb_arg);
221 switch (ret) {
222 case LRU_REMOVED_RETRY:
223 assert_spin_locked(&nlru->lock);
224 case LRU_REMOVED:
225 isolated++;
226 nlru->nr_items--;
227 /*
228 * If the lru lock has been dropped, our list
229 * traversal is now invalid and so we have to
230 * restart from scratch.
231 */
232 if (ret == LRU_REMOVED_RETRY)
233 goto restart;
234 break;
235 case LRU_ROTATE:
236 list_move_tail(item, &l->list);
237 break;
238 case LRU_SKIP:
239 break;
240 case LRU_RETRY:
241 /*
242 * The lru lock has been dropped, our list traversal is
243 * now invalid and so we have to restart from scratch.
244 */
245 assert_spin_locked(&nlru->lock);
246 goto restart;
247 default:
248 BUG();
249 }
250 }
251
252 spin_unlock(&nlru->lock);
253 return isolated;
254}
255
256unsigned long
257list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
258 list_lru_walk_cb isolate, void *cb_arg,
259 unsigned long *nr_to_walk)
260{
261 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
262 isolate, cb_arg, nr_to_walk);
263}
264EXPORT_SYMBOL_GPL(list_lru_walk_one);
265
266unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
267 list_lru_walk_cb isolate, void *cb_arg,
268 unsigned long *nr_to_walk)
269{
270 long isolated = 0;
271 int memcg_idx;
272
273 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
274 nr_to_walk);
275 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
276 for_each_memcg_cache_index(memcg_idx) {
277 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
278 isolate, cb_arg, nr_to_walk);
279 if (*nr_to_walk <= 0)
280 break;
281 }
282 }
283 return isolated;
284}
285EXPORT_SYMBOL_GPL(list_lru_walk_node);
286
287static void init_one_lru(struct list_lru_one *l)
288{
289 INIT_LIST_HEAD(&l->list);
290 l->nr_items = 0;
291}
292
293#ifdef CONFIG_MEMCG_KMEM
294static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
295 int begin, int end)
296{
297 int i;
298
299 for (i = begin; i < end; i++)
300 kfree(memcg_lrus->lru[i]);
301}
302
303static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
304 int begin, int end)
305{
306 int i;
307
308 for (i = begin; i < end; i++) {
309 struct list_lru_one *l;
310
311 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
312 if (!l)
313 goto fail;
314
315 init_one_lru(l);
316 memcg_lrus->lru[i] = l;
317 }
318 return 0;
319fail:
320 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
321 return -ENOMEM;
322}
323
324static int memcg_init_list_lru_node(struct list_lru_node *nlru)
325{
326 int size = memcg_nr_cache_ids;
327
328 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
329 if (!nlru->memcg_lrus)
330 return -ENOMEM;
331
332 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
333 kfree(nlru->memcg_lrus);
334 return -ENOMEM;
335 }
336
337 return 0;
338}
339
340static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
341{
342 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
343 kfree(nlru->memcg_lrus);
344}
345
346static int memcg_update_list_lru_node(struct list_lru_node *nlru,
347 int old_size, int new_size)
348{
349 struct list_lru_memcg *old, *new;
350
351 BUG_ON(old_size > new_size);
352
353 old = nlru->memcg_lrus;
354 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
355 if (!new)
356 return -ENOMEM;
357
358 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
359 kfree(new);
360 return -ENOMEM;
361 }
362
363 memcpy(new, old, old_size * sizeof(void *));
364
365 /*
366 * The lock guarantees that we won't race with a reader
367 * (see list_lru_from_memcg_idx).
368 *
369 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
370 * we have to use IRQ-safe primitives here to avoid deadlock.
371 */
372 spin_lock_irq(&nlru->lock);
373 nlru->memcg_lrus = new;
374 spin_unlock_irq(&nlru->lock);
375
376 kfree(old);
377 return 0;
378}
379
380static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
381 int old_size, int new_size)
382{
383 /* do not bother shrinking the array back to the old size, because we
384 * cannot handle allocation failures here */
385 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
386}
387
388static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
389{
390 int i;
391
392 if (!memcg_aware)
393 return 0;
394
395 for_each_node(i) {
396 if (memcg_init_list_lru_node(&lru->node[i]))
397 goto fail;
398 }
399 return 0;
400fail:
401 for (i = i - 1; i >= 0; i--) {
402 if (!lru->node[i].memcg_lrus)
403 continue;
404 memcg_destroy_list_lru_node(&lru->node[i]);
405 }
406 return -ENOMEM;
407}
408
409static void memcg_destroy_list_lru(struct list_lru *lru)
410{
411 int i;
412
413 if (!list_lru_memcg_aware(lru))
414 return;
415
416 for_each_node(i)
417 memcg_destroy_list_lru_node(&lru->node[i]);
418}
419
420static int memcg_update_list_lru(struct list_lru *lru,
421 int old_size, int new_size)
422{
423 int i;
424
425 if (!list_lru_memcg_aware(lru))
426 return 0;
427
428 for_each_node(i) {
429 if (memcg_update_list_lru_node(&lru->node[i],
430 old_size, new_size))
431 goto fail;
432 }
433 return 0;
434fail:
435 for (i = i - 1; i >= 0; i--) {
436 if (!lru->node[i].memcg_lrus)
437 continue;
438
439 memcg_cancel_update_list_lru_node(&lru->node[i],
440 old_size, new_size);
441 }
442 return -ENOMEM;
443}
444
445static void memcg_cancel_update_list_lru(struct list_lru *lru,
446 int old_size, int new_size)
447{
448 int i;
449
450 if (!list_lru_memcg_aware(lru))
451 return;
452
453 for_each_node(i)
454 memcg_cancel_update_list_lru_node(&lru->node[i],
455 old_size, new_size);
456}
457
458int memcg_update_all_list_lrus(int new_size)
459{
460 int ret = 0;
461 struct list_lru *lru;
462 int old_size = memcg_nr_cache_ids;
463
464 mutex_lock(&list_lrus_mutex);
465 list_for_each_entry(lru, &list_lrus, list) {
466 ret = memcg_update_list_lru(lru, old_size, new_size);
467 if (ret)
468 goto fail;
469 }
470out:
471 mutex_unlock(&list_lrus_mutex);
472 return ret;
473fail:
474 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
475 memcg_cancel_update_list_lru(lru, old_size, new_size);
476 goto out;
477}
478
479static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
480 int src_idx, int dst_idx)
481{
482 struct list_lru_one *src, *dst;
483
484 /*
485 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
486 * we have to use IRQ-safe primitives here to avoid deadlock.
487 */
488 spin_lock_irq(&nlru->lock);
489
490 src = list_lru_from_memcg_idx(nlru, src_idx);
491 dst = list_lru_from_memcg_idx(nlru, dst_idx);
492
493 list_splice_init(&src->list, &dst->list);
494 dst->nr_items += src->nr_items;
495 src->nr_items = 0;
496
497 spin_unlock_irq(&nlru->lock);
498}
499
500static void memcg_drain_list_lru(struct list_lru *lru,
501 int src_idx, int dst_idx)
502{
503 int i;
504
505 if (!list_lru_memcg_aware(lru))
506 return;
507
508 for_each_node(i)
509 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
510}
511
512void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
513{
514 struct list_lru *lru;
515
516 mutex_lock(&list_lrus_mutex);
517 list_for_each_entry(lru, &list_lrus, list)
518 memcg_drain_list_lru(lru, src_idx, dst_idx);
519 mutex_unlock(&list_lrus_mutex);
520}
521#else
522static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
523{
524 return 0;
525}
526
527static void memcg_destroy_list_lru(struct list_lru *lru)
528{
529}
530#endif /* CONFIG_MEMCG_KMEM */
531
532int __list_lru_init(struct list_lru *lru, bool memcg_aware,
533 struct lock_class_key *key)
534{
535 int i;
536 size_t size = sizeof(*lru->node) * nr_node_ids;
537 int err = -ENOMEM;
538
539 memcg_get_cache_ids();
540
541 lru->node = kzalloc(size, GFP_KERNEL);
542 if (!lru->node)
543 goto out;
544
545 for_each_node(i) {
546 spin_lock_init(&lru->node[i].lock);
547 if (key)
548 lockdep_set_class(&lru->node[i].lock, key);
549 init_one_lru(&lru->node[i].lru);
550 }
551
552 err = memcg_init_list_lru(lru, memcg_aware);
553 if (err) {
554 kfree(lru->node);
555 /* Do this so a list_lru_destroy() doesn't crash: */
556 lru->node = NULL;
557 goto out;
558 }
559
560 list_lru_register(lru);
561out:
562 memcg_put_cache_ids();
563 return err;
564}
565EXPORT_SYMBOL_GPL(__list_lru_init);
566
567void list_lru_destroy(struct list_lru *lru)
568{
569 /* Already destroyed or not yet initialized? */
570 if (!lru->node)
571 return;
572
573 memcg_get_cache_ids();
574
575 list_lru_unregister(lru);
576
577 memcg_destroy_list_lru(lru);
578 kfree(lru->node);
579 lru->node = NULL;
580
581 memcg_put_cache_ids();
582}
583EXPORT_SYMBOL_GPL(list_lru_destroy);