blob: 7af7bedd7c0252ebd163ea49b61aec71f3d3557c [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * pNFS functions to call and manage layout drivers.
3 *
4 * Copyright (c) 2002 [year of first publication]
5 * The Regents of the University of Michigan
6 * All Rights Reserved
7 *
8 * Dean Hildebrand <dhildebz@umich.edu>
9 *
10 * Permission is granted to use, copy, create derivative works, and
11 * redistribute this software and such derivative works for any purpose,
12 * so long as the name of the University of Michigan is not used in
13 * any advertising or publicity pertaining to the use or distribution
14 * of this software without specific, written prior authorization. If
15 * the above copyright notice or any other identification of the
16 * University of Michigan is included in any copy of any portion of
17 * this software, then the disclaimer below must also be included.
18 *
19 * This software is provided as is, without representation or warranty
20 * of any kind either express or implied, including without limitation
21 * the implied warranties of merchantability, fitness for a particular
22 * purpose, or noninfringement. The Regents of the University of
23 * Michigan shall not be liable for any damages, including special,
24 * indirect, incidental, or consequential damages, with respect to any
25 * claim arising out of or in connection with the use of the software,
26 * even if it has been or is hereafter advised of the possibility of
27 * such damages.
28 */
29
30#include <linux/nfs_fs.h>
31#include <linux/nfs_page.h>
32#include <linux/module.h>
33#include "internal.h"
34#include "pnfs.h"
35#include "iostat.h"
36#include "nfs4trace.h"
37#include "delegation.h"
38#include "nfs42.h"
39
40#define NFSDBG_FACILITY NFSDBG_PNFS
41#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
42
43/* Locking:
44 *
45 * pnfs_spinlock:
46 * protects pnfs_modules_tbl.
47 */
48static DEFINE_SPINLOCK(pnfs_spinlock);
49
50/*
51 * pnfs_modules_tbl holds all pnfs modules
52 */
53static LIST_HEAD(pnfs_modules_tbl);
54
55static int
56pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
57 enum pnfs_iomode iomode, bool sync);
58
59/* Return the registered pnfs layout driver module matching given id */
60static struct pnfs_layoutdriver_type *
61find_pnfs_driver_locked(u32 id)
62{
63 struct pnfs_layoutdriver_type *local;
64
65 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
66 if (local->id == id)
67 goto out;
68 local = NULL;
69out:
70 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
71 return local;
72}
73
74static struct pnfs_layoutdriver_type *
75find_pnfs_driver(u32 id)
76{
77 struct pnfs_layoutdriver_type *local;
78
79 spin_lock(&pnfs_spinlock);
80 local = find_pnfs_driver_locked(id);
81 if (local != NULL && !try_module_get(local->owner)) {
82 dprintk("%s: Could not grab reference on module\n", __func__);
83 local = NULL;
84 }
85 spin_unlock(&pnfs_spinlock);
86 return local;
87}
88
89void
90unset_pnfs_layoutdriver(struct nfs_server *nfss)
91{
92 if (nfss->pnfs_curr_ld) {
93 if (nfss->pnfs_curr_ld->clear_layoutdriver)
94 nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
95 /* Decrement the MDS count. Purge the deviceid cache if zero */
96 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
97 nfs4_deviceid_purge_client(nfss->nfs_client);
98 module_put(nfss->pnfs_curr_ld->owner);
99 }
100 nfss->pnfs_curr_ld = NULL;
101}
102
103/*
104 * Try to set the server's pnfs module to the pnfs layout type specified by id.
105 * Currently only one pNFS layout driver per filesystem is supported.
106 *
107 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
108 */
109void
110set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
111 u32 id)
112{
113 struct pnfs_layoutdriver_type *ld_type = NULL;
114
115 if (id == 0)
116 goto out_no_driver;
117 if (!(server->nfs_client->cl_exchange_flags &
118 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
119 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
120 __func__, id, server->nfs_client->cl_exchange_flags);
121 goto out_no_driver;
122 }
123 ld_type = find_pnfs_driver(id);
124 if (!ld_type) {
125 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
126 ld_type = find_pnfs_driver(id);
127 if (!ld_type) {
128 dprintk("%s: No pNFS module found for %u.\n",
129 __func__, id);
130 goto out_no_driver;
131 }
132 }
133 server->pnfs_curr_ld = ld_type;
134 if (ld_type->set_layoutdriver
135 && ld_type->set_layoutdriver(server, mntfh)) {
136 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
137 "driver %u.\n", __func__, id);
138 module_put(ld_type->owner);
139 goto out_no_driver;
140 }
141 /* Bump the MDS count */
142 atomic_inc(&server->nfs_client->cl_mds_count);
143
144 dprintk("%s: pNFS module for %u set\n", __func__, id);
145 return;
146
147out_no_driver:
148 dprintk("%s: Using NFSv4 I/O\n", __func__);
149 server->pnfs_curr_ld = NULL;
150}
151
152int
153pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
154{
155 int status = -EINVAL;
156 struct pnfs_layoutdriver_type *tmp;
157
158 if (ld_type->id == 0) {
159 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
160 return status;
161 }
162 if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
163 printk(KERN_ERR "NFS: %s Layout driver must provide "
164 "alloc_lseg and free_lseg.\n", __func__);
165 return status;
166 }
167
168 spin_lock(&pnfs_spinlock);
169 tmp = find_pnfs_driver_locked(ld_type->id);
170 if (!tmp) {
171 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
172 status = 0;
173 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
174 ld_type->name);
175 } else {
176 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
177 __func__, ld_type->id);
178 }
179 spin_unlock(&pnfs_spinlock);
180
181 return status;
182}
183EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
184
185void
186pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
187{
188 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
189 spin_lock(&pnfs_spinlock);
190 list_del(&ld_type->pnfs_tblid);
191 spin_unlock(&pnfs_spinlock);
192}
193EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
194
195/*
196 * pNFS client layout cache
197 */
198
199/* Need to hold i_lock if caller does not already hold reference */
200void
201pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
202{
203 atomic_inc(&lo->plh_refcount);
204}
205
206static struct pnfs_layout_hdr *
207pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
208{
209 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
210 return ld->alloc_layout_hdr(ino, gfp_flags);
211}
212
213static void
214pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
215{
216 struct nfs_server *server = NFS_SERVER(lo->plh_inode);
217 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
218
219 if (!list_empty(&lo->plh_layouts)) {
220 struct nfs_client *clp = server->nfs_client;
221
222 spin_lock(&clp->cl_lock);
223 list_del_init(&lo->plh_layouts);
224 spin_unlock(&clp->cl_lock);
225 }
226 put_rpccred(lo->plh_lc_cred);
227 return ld->free_layout_hdr(lo);
228}
229
230static void
231pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
232{
233 struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
234 dprintk("%s: freeing layout cache %p\n", __func__, lo);
235 nfsi->layout = NULL;
236 /* Reset MDS Threshold I/O counters */
237 nfsi->write_io = 0;
238 nfsi->read_io = 0;
239}
240
241void
242pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
243{
244 struct inode *inode = lo->plh_inode;
245
246 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
247 if (!list_empty(&lo->plh_segs))
248 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
249 pnfs_detach_layout_hdr(lo);
250 spin_unlock(&inode->i_lock);
251 pnfs_free_layout_hdr(lo);
252 }
253}
254
255static int
256pnfs_iomode_to_fail_bit(u32 iomode)
257{
258 return iomode == IOMODE_RW ?
259 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
260}
261
262static void
263pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
264{
265 lo->plh_retry_timestamp = jiffies;
266 if (!test_and_set_bit(fail_bit, &lo->plh_flags))
267 atomic_inc(&lo->plh_refcount);
268}
269
270static void
271pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
272{
273 if (test_and_clear_bit(fail_bit, &lo->plh_flags))
274 atomic_dec(&lo->plh_refcount);
275}
276
277static void
278pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
279{
280 struct inode *inode = lo->plh_inode;
281 struct pnfs_layout_range range = {
282 .iomode = iomode,
283 .offset = 0,
284 .length = NFS4_MAX_UINT64,
285 };
286 LIST_HEAD(head);
287
288 spin_lock(&inode->i_lock);
289 pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
290 pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
291 spin_unlock(&inode->i_lock);
292 pnfs_free_lseg_list(&head);
293 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
294 iomode == IOMODE_RW ? "RW" : "READ");
295}
296
297static bool
298pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
299{
300 unsigned long start, end;
301 int fail_bit = pnfs_iomode_to_fail_bit(iomode);
302
303 if (test_bit(fail_bit, &lo->plh_flags) == 0)
304 return false;
305 end = jiffies;
306 start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
307 if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
308 /* It is time to retry the failed layoutgets */
309 pnfs_layout_clear_fail_bit(lo, fail_bit);
310 return false;
311 }
312 return true;
313}
314
315static void
316init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
317{
318 INIT_LIST_HEAD(&lseg->pls_list);
319 INIT_LIST_HEAD(&lseg->pls_lc_list);
320 atomic_set(&lseg->pls_refcount, 1);
321 smp_mb();
322 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
323 lseg->pls_layout = lo;
324}
325
326static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
327{
328 struct inode *ino = lseg->pls_layout->plh_inode;
329
330 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
331}
332
333static void
334pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
335 struct pnfs_layout_segment *lseg)
336{
337 struct inode *inode = lo->plh_inode;
338
339 WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
340 list_del_init(&lseg->pls_list);
341 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
342 atomic_dec(&lo->plh_refcount);
343 if (list_empty(&lo->plh_segs))
344 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
345 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
346}
347
348/* Return true if layoutreturn is needed */
349static bool
350pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
351 struct pnfs_layout_segment *lseg)
352{
353 struct pnfs_layout_segment *s;
354
355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
356 return false;
357
358 list_for_each_entry(s, &lo->plh_segs, pls_list)
359 if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
360 return false;
361
362 return true;
363}
364
365static bool
366pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
367{
368 /* Serialise LAYOUTGET/LAYOUTRETURN */
369 if (atomic_read(&lo->plh_outstanding) != 0)
370 return false;
371 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
372 return false;
373 lo->plh_return_iomode = 0;
374 pnfs_get_layout_hdr(lo);
375 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
376 return true;
377}
378
379static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
380 struct pnfs_layout_hdr *lo, struct inode *inode)
381{
382 lo = lseg->pls_layout;
383 inode = lo->plh_inode;
384
385 spin_lock(&inode->i_lock);
386 if (pnfs_layout_need_return(lo, lseg)) {
387 nfs4_stateid stateid;
388 enum pnfs_iomode iomode;
389 bool send;
390
391 stateid = lo->plh_stateid;
392 iomode = lo->plh_return_iomode;
393 send = pnfs_prepare_layoutreturn(lo);
394 spin_unlock(&inode->i_lock);
395 if (send) {
396 /* Send an async layoutreturn so we dont deadlock */
397 pnfs_send_layoutreturn(lo, stateid, iomode, false);
398 }
399 } else
400 spin_unlock(&inode->i_lock);
401}
402
403void
404pnfs_put_lseg(struct pnfs_layout_segment *lseg)
405{
406 struct pnfs_layout_hdr *lo;
407 struct inode *inode;
408
409 if (!lseg)
410 return;
411
412 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
413 atomic_read(&lseg->pls_refcount),
414 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
415
416 /* Handle the case where refcount != 1 */
417 if (atomic_add_unless(&lseg->pls_refcount, -1, 1))
418 return;
419
420 lo = lseg->pls_layout;
421 inode = lo->plh_inode;
422 /* Do we need a layoutreturn? */
423 if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
424 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
425
426 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
427 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
428 spin_unlock(&inode->i_lock);
429 return;
430 }
431 pnfs_get_layout_hdr(lo);
432 pnfs_layout_remove_lseg(lo, lseg);
433 spin_unlock(&inode->i_lock);
434 pnfs_free_lseg(lseg);
435 pnfs_put_layout_hdr(lo);
436 }
437}
438EXPORT_SYMBOL_GPL(pnfs_put_lseg);
439
440static void pnfs_free_lseg_async_work(struct work_struct *work)
441{
442 struct pnfs_layout_segment *lseg;
443 struct pnfs_layout_hdr *lo;
444
445 lseg = container_of(work, struct pnfs_layout_segment, pls_work);
446 lo = lseg->pls_layout;
447
448 pnfs_free_lseg(lseg);
449 pnfs_put_layout_hdr(lo);
450}
451
452static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
453{
454 INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
455 schedule_work(&lseg->pls_work);
456}
457
458void
459pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
460{
461 if (!lseg)
462 return;
463
464 assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);
465
466 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
467 atomic_read(&lseg->pls_refcount),
468 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
469 if (atomic_dec_and_test(&lseg->pls_refcount)) {
470 struct pnfs_layout_hdr *lo = lseg->pls_layout;
471 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
472 return;
473 pnfs_get_layout_hdr(lo);
474 pnfs_layout_remove_lseg(lo, lseg);
475 pnfs_free_lseg_async(lseg);
476 }
477}
478EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
479
480static u64
481end_offset(u64 start, u64 len)
482{
483 u64 end;
484
485 end = start + len;
486 return end >= start ? end : NFS4_MAX_UINT64;
487}
488
489/*
490 * is l2 fully contained in l1?
491 * start1 end1
492 * [----------------------------------)
493 * start2 end2
494 * [----------------)
495 */
496static bool
497pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
498 const struct pnfs_layout_range *l2)
499{
500 u64 start1 = l1->offset;
501 u64 end1 = end_offset(start1, l1->length);
502 u64 start2 = l2->offset;
503 u64 end2 = end_offset(start2, l2->length);
504
505 return (start1 <= start2) && (end1 >= end2);
506}
507
508/*
509 * is l1 and l2 intersecting?
510 * start1 end1
511 * [----------------------------------)
512 * start2 end2
513 * [----------------)
514 */
515static bool
516pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
517 const struct pnfs_layout_range *l2)
518{
519 u64 start1 = l1->offset;
520 u64 end1 = end_offset(start1, l1->length);
521 u64 start2 = l2->offset;
522 u64 end2 = end_offset(start2, l2->length);
523
524 return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
525 (end2 == NFS4_MAX_UINT64 || end2 > start1);
526}
527
528static bool
529should_free_lseg(const struct pnfs_layout_range *lseg_range,
530 const struct pnfs_layout_range *recall_range)
531{
532 return (recall_range->iomode == IOMODE_ANY ||
533 lseg_range->iomode == recall_range->iomode) &&
534 pnfs_lseg_range_intersecting(lseg_range, recall_range);
535}
536
537static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
538 struct list_head *tmp_list)
539{
540 if (!atomic_dec_and_test(&lseg->pls_refcount))
541 return false;
542 pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
543 list_add(&lseg->pls_list, tmp_list);
544 return true;
545}
546
547/* Returns 1 if lseg is removed from list, 0 otherwise */
548static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
549 struct list_head *tmp_list)
550{
551 int rv = 0;
552
553 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
554 /* Remove the reference keeping the lseg in the
555 * list. It will now be removed when all
556 * outstanding io is finished.
557 */
558 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
559 atomic_read(&lseg->pls_refcount));
560 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
561 rv = 1;
562 }
563 return rv;
564}
565
566/* Returns count of number of matching invalid lsegs remaining in list
567 * after call.
568 */
569int
570pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
571 struct list_head *tmp_list,
572 struct pnfs_layout_range *recall_range)
573{
574 struct pnfs_layout_segment *lseg, *next;
575 int invalid = 0, removed = 0;
576
577 dprintk("%s:Begin lo %p\n", __func__, lo);
578
579 if (list_empty(&lo->plh_segs))
580 return 0;
581 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
582 if (!recall_range ||
583 should_free_lseg(&lseg->pls_range, recall_range)) {
584 dprintk("%s: freeing lseg %p iomode %d "
585 "offset %llu length %llu\n", __func__,
586 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
587 lseg->pls_range.length);
588 invalid++;
589 removed += mark_lseg_invalid(lseg, tmp_list);
590 }
591 dprintk("%s:Return %i\n", __func__, invalid - removed);
592 return invalid - removed;
593}
594
595/* note free_me must contain lsegs from a single layout_hdr */
596void
597pnfs_free_lseg_list(struct list_head *free_me)
598{
599 struct pnfs_layout_segment *lseg, *tmp;
600
601 if (list_empty(free_me))
602 return;
603
604 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
605 list_del(&lseg->pls_list);
606 pnfs_free_lseg(lseg);
607 }
608}
609
610void
611pnfs_destroy_layout(struct nfs_inode *nfsi)
612{
613 struct pnfs_layout_hdr *lo;
614 LIST_HEAD(tmp_list);
615
616 spin_lock(&nfsi->vfs_inode.i_lock);
617 lo = nfsi->layout;
618 if (lo) {
619 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
620 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
621 pnfs_get_layout_hdr(lo);
622 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
623 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
624 pnfs_clear_retry_layoutget(lo);
625 spin_unlock(&nfsi->vfs_inode.i_lock);
626 pnfs_free_lseg_list(&tmp_list);
627 pnfs_put_layout_hdr(lo);
628 } else
629 spin_unlock(&nfsi->vfs_inode.i_lock);
630}
631EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
632
633static bool
634pnfs_layout_add_bulk_destroy_list(struct inode *inode,
635 struct list_head *layout_list)
636{
637 struct pnfs_layout_hdr *lo;
638 bool ret = false;
639
640 spin_lock(&inode->i_lock);
641 lo = NFS_I(inode)->layout;
642 if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
643 pnfs_get_layout_hdr(lo);
644 list_add(&lo->plh_bulk_destroy, layout_list);
645 ret = true;
646 }
647 spin_unlock(&inode->i_lock);
648 return ret;
649}
650
651/* Caller must hold rcu_read_lock and clp->cl_lock */
652static int
653pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
654 struct nfs_server *server,
655 struct list_head *layout_list)
656{
657 struct pnfs_layout_hdr *lo, *next;
658 struct inode *inode;
659
660 list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
661 inode = igrab(lo->plh_inode);
662 if (inode == NULL)
663 continue;
664 list_del_init(&lo->plh_layouts);
665 if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
666 continue;
667 rcu_read_unlock();
668 spin_unlock(&clp->cl_lock);
669 iput(inode);
670 spin_lock(&clp->cl_lock);
671 rcu_read_lock();
672 return -EAGAIN;
673 }
674 return 0;
675}
676
677static int
678pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
679 bool is_bulk_recall)
680{
681 struct pnfs_layout_hdr *lo;
682 struct inode *inode;
683 struct pnfs_layout_range range = {
684 .iomode = IOMODE_ANY,
685 .offset = 0,
686 .length = NFS4_MAX_UINT64,
687 };
688 LIST_HEAD(lseg_list);
689 int ret = 0;
690
691 while (!list_empty(layout_list)) {
692 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
693 plh_bulk_destroy);
694 dprintk("%s freeing layout for inode %lu\n", __func__,
695 lo->plh_inode->i_ino);
696 inode = lo->plh_inode;
697
698 pnfs_layoutcommit_inode(inode, false);
699
700 spin_lock(&inode->i_lock);
701 list_del_init(&lo->plh_bulk_destroy);
702 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
703 if (is_bulk_recall)
704 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
705 if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
706 ret = -EAGAIN;
707 spin_unlock(&inode->i_lock);
708 pnfs_free_lseg_list(&lseg_list);
709 pnfs_put_layout_hdr(lo);
710 iput(inode);
711 }
712 return ret;
713}
714
715int
716pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
717 struct nfs_fsid *fsid,
718 bool is_recall)
719{
720 struct nfs_server *server;
721 LIST_HEAD(layout_list);
722
723 spin_lock(&clp->cl_lock);
724 rcu_read_lock();
725restart:
726 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
727 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
728 continue;
729 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
730 server,
731 &layout_list) != 0)
732 goto restart;
733 }
734 rcu_read_unlock();
735 spin_unlock(&clp->cl_lock);
736
737 if (list_empty(&layout_list))
738 return 0;
739 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
740}
741
742int
743pnfs_destroy_layouts_byclid(struct nfs_client *clp,
744 bool is_recall)
745{
746 struct nfs_server *server;
747 LIST_HEAD(layout_list);
748
749 spin_lock(&clp->cl_lock);
750 rcu_read_lock();
751restart:
752 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
753 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
754 server,
755 &layout_list) != 0)
756 goto restart;
757 }
758 rcu_read_unlock();
759 spin_unlock(&clp->cl_lock);
760
761 if (list_empty(&layout_list))
762 return 0;
763 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
764}
765
766/*
767 * Called by the state manger to remove all layouts established under an
768 * expired lease.
769 */
770void
771pnfs_destroy_all_layouts(struct nfs_client *clp)
772{
773 nfs4_deviceid_mark_client_invalid(clp);
774 nfs4_deviceid_purge_client(clp);
775
776 pnfs_destroy_layouts_byclid(clp, false);
777}
778
779/*
780 * Compare 2 layout stateid sequence ids, to see which is newer,
781 * taking into account wraparound issues.
782 */
783static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
784{
785 return (s32)(s1 - s2) > 0;
786}
787
788/* update lo->plh_stateid with new if is more recent */
789void
790pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
791 bool update_barrier)
792{
793 u32 oldseq, newseq, new_barrier;
794 int empty = list_empty(&lo->plh_segs);
795
796 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
797 newseq = be32_to_cpu(new->seqid);
798 if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
799 nfs4_stateid_copy(&lo->plh_stateid, new);
800 if (update_barrier) {
801 new_barrier = be32_to_cpu(new->seqid);
802 } else {
803 /* Because of wraparound, we want to keep the barrier
804 * "close" to the current seqids.
805 */
806 new_barrier = newseq - atomic_read(&lo->plh_outstanding);
807 }
808 if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
809 lo->plh_barrier = new_barrier;
810 }
811}
812
813static bool
814pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
815 const nfs4_stateid *stateid)
816{
817 u32 seqid = be32_to_cpu(stateid->seqid);
818
819 return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
820}
821
822/* lget is set to 1 if called from inside send_layoutget call chain */
823static bool
824pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
825{
826 return lo->plh_block_lgets ||
827 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
828}
829
830int
831pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
832 struct pnfs_layout_range *range,
833 struct nfs4_state *open_state)
834{
835 int status = 0;
836
837 dprintk("--> %s\n", __func__);
838 spin_lock(&lo->plh_inode->i_lock);
839 if (pnfs_layoutgets_blocked(lo)) {
840 status = -EAGAIN;
841 } else if (!nfs4_valid_open_stateid(open_state)) {
842 status = -EBADF;
843 } else if (list_empty(&lo->plh_segs) ||
844 test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
845 int seq;
846
847 do {
848 seq = read_seqbegin(&open_state->seqlock);
849 nfs4_stateid_copy(dst, &open_state->stateid);
850 } while (read_seqretry(&open_state->seqlock, seq));
851 } else
852 nfs4_stateid_copy(dst, &lo->plh_stateid);
853 spin_unlock(&lo->plh_inode->i_lock);
854 dprintk("<-- %s\n", __func__);
855 return status;
856}
857
858/*
859* Get layout from server.
860* for now, assume that whole file layouts are requested.
861* arg->offset: 0
862* arg->length: all ones
863*/
864static struct pnfs_layout_segment *
865send_layoutget(struct pnfs_layout_hdr *lo,
866 struct nfs_open_context *ctx,
867 struct pnfs_layout_range *range,
868 gfp_t gfp_flags)
869{
870 struct inode *ino = lo->plh_inode;
871 struct nfs_server *server = NFS_SERVER(ino);
872 struct nfs4_layoutget *lgp;
873 struct pnfs_layout_segment *lseg;
874 loff_t i_size;
875
876 dprintk("--> %s\n", __func__);
877
878 /*
879 * Synchronously retrieve layout information from server and
880 * store in lseg. If we race with a concurrent seqid morphing
881 * op, then re-send the LAYOUTGET.
882 */
883 do {
884 lgp = kzalloc(sizeof(*lgp), gfp_flags);
885 if (lgp == NULL)
886 return NULL;
887
888 i_size = i_size_read(ino);
889
890 lgp->args.minlength = PAGE_CACHE_SIZE;
891 if (lgp->args.minlength > range->length)
892 lgp->args.minlength = range->length;
893 if (range->iomode == IOMODE_READ) {
894 if (range->offset >= i_size)
895 lgp->args.minlength = 0;
896 else if (i_size - range->offset < lgp->args.minlength)
897 lgp->args.minlength = i_size - range->offset;
898 }
899 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
900 lgp->args.range = *range;
901 lgp->args.type = server->pnfs_curr_ld->id;
902 lgp->args.inode = ino;
903 lgp->args.ctx = get_nfs_open_context(ctx);
904 lgp->gfp_flags = gfp_flags;
905 lgp->cred = lo->plh_lc_cred;
906
907 lseg = nfs4_proc_layoutget(lgp, gfp_flags);
908 } while (lseg == ERR_PTR(-EAGAIN));
909
910 if (IS_ERR(lseg)) {
911 switch (PTR_ERR(lseg)) {
912 case -ENOMEM:
913 case -ERESTARTSYS:
914 break;
915 default:
916 /* remember that LAYOUTGET failed and suspend trying */
917 pnfs_layout_io_set_failed(lo, range->iomode);
918 }
919 return NULL;
920 } else
921 pnfs_layout_clear_fail_bit(lo,
922 pnfs_iomode_to_fail_bit(range->iomode));
923
924 return lseg;
925}
926
927static void pnfs_clear_layoutcommit(struct inode *inode,
928 struct list_head *head)
929{
930 struct nfs_inode *nfsi = NFS_I(inode);
931 struct pnfs_layout_segment *lseg, *tmp;
932
933 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
934 return;
935 list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
936 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
937 continue;
938 pnfs_lseg_dec_and_remove_zero(lseg, head);
939 }
940}
941
942void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
943{
944 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
945 smp_mb__after_atomic();
946 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
947 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
948}
949
950static int
951pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid,
952 enum pnfs_iomode iomode, bool sync)
953{
954 struct inode *ino = lo->plh_inode;
955 struct nfs4_layoutreturn *lrp;
956 int status = 0;
957
958 lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
959 if (unlikely(lrp == NULL)) {
960 status = -ENOMEM;
961 spin_lock(&ino->i_lock);
962 pnfs_clear_layoutreturn_waitbit(lo);
963 spin_unlock(&ino->i_lock);
964 pnfs_put_layout_hdr(lo);
965 goto out;
966 }
967
968 lrp->args.stateid = stateid;
969 lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
970 lrp->args.inode = ino;
971 lrp->args.range.iomode = iomode;
972 lrp->args.range.offset = 0;
973 lrp->args.range.length = NFS4_MAX_UINT64;
974 lrp->args.layout = lo;
975 lrp->clp = NFS_SERVER(ino)->nfs_client;
976 lrp->cred = lo->plh_lc_cred;
977
978 status = nfs4_proc_layoutreturn(lrp, sync);
979out:
980 dprintk("<-- %s status: %d\n", __func__, status);
981 return status;
982}
983
984/*
985 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
986 * when the layout segment list is empty.
987 *
988 * Note that a pnfs_layout_hdr can exist with an empty layout segment
989 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
990 * deviceid is marked invalid.
991 */
992int
993_pnfs_return_layout(struct inode *ino)
994{
995 struct pnfs_layout_hdr *lo = NULL;
996 struct nfs_inode *nfsi = NFS_I(ino);
997 LIST_HEAD(tmp_list);
998 nfs4_stateid stateid;
999 int status = 0, empty;
1000 bool send;
1001
1002 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1003
1004 spin_lock(&ino->i_lock);
1005 lo = nfsi->layout;
1006 if (!lo) {
1007 spin_unlock(&ino->i_lock);
1008 dprintk("NFS: %s no layout to return\n", __func__);
1009 goto out;
1010 }
1011 stateid = nfsi->layout->plh_stateid;
1012 /* Reference matched in nfs4_layoutreturn_release */
1013 pnfs_get_layout_hdr(lo);
1014 empty = list_empty(&lo->plh_segs);
1015 pnfs_clear_layoutcommit(ino, &tmp_list);
1016 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
1017
1018 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
1019 struct pnfs_layout_range range = {
1020 .iomode = IOMODE_ANY,
1021 .offset = 0,
1022 .length = NFS4_MAX_UINT64,
1023 };
1024 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1025 }
1026
1027 /* Don't send a LAYOUTRETURN if list was initially empty */
1028 if (empty) {
1029 spin_unlock(&ino->i_lock);
1030 dprintk("NFS: %s no layout segments to return\n", __func__);
1031 goto out_put_layout_hdr;
1032 }
1033
1034 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1035 send = pnfs_prepare_layoutreturn(lo);
1036 spin_unlock(&ino->i_lock);
1037 pnfs_free_lseg_list(&tmp_list);
1038 if (send)
1039 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1040out_put_layout_hdr:
1041 pnfs_put_layout_hdr(lo);
1042out:
1043 dprintk("<-- %s status: %d\n", __func__, status);
1044 return status;
1045}
1046EXPORT_SYMBOL_GPL(_pnfs_return_layout);
1047
1048int
1049pnfs_commit_and_return_layout(struct inode *inode)
1050{
1051 struct pnfs_layout_hdr *lo;
1052 int ret;
1053
1054 spin_lock(&inode->i_lock);
1055 lo = NFS_I(inode)->layout;
1056 if (lo == NULL) {
1057 spin_unlock(&inode->i_lock);
1058 return 0;
1059 }
1060 pnfs_get_layout_hdr(lo);
1061 /* Block new layoutgets and read/write to ds */
1062 lo->plh_block_lgets++;
1063 spin_unlock(&inode->i_lock);
1064 filemap_fdatawait(inode->i_mapping);
1065 ret = pnfs_layoutcommit_inode(inode, true);
1066 if (ret == 0)
1067 ret = _pnfs_return_layout(inode);
1068 spin_lock(&inode->i_lock);
1069 lo->plh_block_lgets--;
1070 spin_unlock(&inode->i_lock);
1071 pnfs_put_layout_hdr(lo);
1072 return ret;
1073}
1074
1075bool pnfs_roc(struct inode *ino)
1076{
1077 struct nfs_inode *nfsi = NFS_I(ino);
1078 struct nfs_open_context *ctx;
1079 struct nfs4_state *state;
1080 struct pnfs_layout_hdr *lo;
1081 struct pnfs_layout_segment *lseg, *tmp;
1082 nfs4_stateid stateid;
1083 LIST_HEAD(tmp_list);
1084 bool found = false, layoutreturn = false, roc = false;
1085
1086 spin_lock(&ino->i_lock);
1087 lo = nfsi->layout;
1088 if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
1089 goto out_noroc;
1090
1091 /* no roc if we hold a delegation */
1092 if (nfs4_check_delegation(ino, FMODE_READ))
1093 goto out_noroc;
1094
1095 list_for_each_entry(ctx, &nfsi->open_files, list) {
1096 state = ctx->state;
1097 /* Don't return layout if there is open file state */
1098 if (state != NULL && state->state != 0)
1099 goto out_noroc;
1100 }
1101
1102 stateid = lo->plh_stateid;
1103 /* always send layoutreturn if being marked so */
1104 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1105 &lo->plh_flags))
1106 layoutreturn = pnfs_prepare_layoutreturn(lo);
1107
1108 pnfs_clear_retry_layoutget(lo);
1109 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
1110 /* If we are sending layoutreturn, invalidate all valid lsegs */
1111 if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
1112 mark_lseg_invalid(lseg, &tmp_list);
1113 found = true;
1114 }
1115 /* ROC in two conditions:
1116 * 1. there are ROC lsegs
1117 * 2. we don't send layoutreturn
1118 */
1119 if (found && !layoutreturn) {
1120 /* lo ref dropped in pnfs_roc_release() */
1121 pnfs_get_layout_hdr(lo);
1122 roc = true;
1123 }
1124
1125out_noroc:
1126 spin_unlock(&ino->i_lock);
1127 pnfs_free_lseg_list(&tmp_list);
1128 pnfs_layoutcommit_inode(ino, true);
1129 if (layoutreturn)
1130 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1131 return roc;
1132}
1133
1134void pnfs_roc_release(struct inode *ino)
1135{
1136 struct pnfs_layout_hdr *lo;
1137
1138 spin_lock(&ino->i_lock);
1139 lo = NFS_I(ino)->layout;
1140 pnfs_clear_layoutreturn_waitbit(lo);
1141 if (atomic_dec_and_test(&lo->plh_refcount)) {
1142 pnfs_detach_layout_hdr(lo);
1143 spin_unlock(&ino->i_lock);
1144 pnfs_free_layout_hdr(lo);
1145 } else
1146 spin_unlock(&ino->i_lock);
1147}
1148
1149void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
1150{
1151 struct pnfs_layout_hdr *lo;
1152
1153 spin_lock(&ino->i_lock);
1154 lo = NFS_I(ino)->layout;
1155 if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
1156 lo->plh_barrier = barrier;
1157 spin_unlock(&ino->i_lock);
1158 trace_nfs4_layoutreturn_on_close(ino, 0);
1159}
1160
1161void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
1162{
1163 struct nfs_inode *nfsi = NFS_I(ino);
1164 struct pnfs_layout_hdr *lo;
1165 u32 current_seqid;
1166
1167 spin_lock(&ino->i_lock);
1168 lo = nfsi->layout;
1169 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1170
1171 /* Since close does not return a layout stateid for use as
1172 * a barrier, we choose the worst-case barrier.
1173 */
1174 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1175 spin_unlock(&ino->i_lock);
1176}
1177
1178bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1179{
1180 struct nfs_inode *nfsi = NFS_I(ino);
1181 struct pnfs_layout_hdr *lo;
1182 bool sleep = false;
1183
1184 /* we might not have grabbed lo reference. so need to check under
1185 * i_lock */
1186 spin_lock(&ino->i_lock);
1187 lo = nfsi->layout;
1188 if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1189 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1190 sleep = true;
1191 }
1192 spin_unlock(&ino->i_lock);
1193 return sleep;
1194}
1195
1196/*
1197 * Compare two layout segments for sorting into layout cache.
1198 * We want to preferentially return RW over RO layouts, so ensure those
1199 * are seen first.
1200 */
1201static s64
1202pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1203 const struct pnfs_layout_range *l2)
1204{
1205 s64 d;
1206
1207 /* high offset > low offset */
1208 d = l1->offset - l2->offset;
1209 if (d)
1210 return d;
1211
1212 /* short length > long length */
1213 d = l2->length - l1->length;
1214 if (d)
1215 return d;
1216
1217 /* read > read/write */
1218 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1219}
1220
1221static bool
1222pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
1223 const struct pnfs_layout_range *l2)
1224{
1225 return pnfs_lseg_range_cmp(l1, l2) > 0;
1226}
1227
1228static bool
1229pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
1230 struct pnfs_layout_segment *old)
1231{
1232 return false;
1233}
1234
1235void
1236pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1237 struct pnfs_layout_segment *lseg,
1238 bool (*is_after)(const struct pnfs_layout_range *,
1239 const struct pnfs_layout_range *),
1240 bool (*do_merge)(struct pnfs_layout_segment *,
1241 struct pnfs_layout_segment *),
1242 struct list_head *free_me)
1243{
1244 struct pnfs_layout_segment *lp, *tmp;
1245
1246 dprintk("%s:Begin\n", __func__);
1247
1248 list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
1249 if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
1250 continue;
1251 if (do_merge(lseg, lp)) {
1252 mark_lseg_invalid(lp, free_me);
1253 continue;
1254 }
1255 if (is_after(&lseg->pls_range, &lp->pls_range))
1256 continue;
1257 list_add_tail(&lseg->pls_list, &lp->pls_list);
1258 dprintk("%s: inserted lseg %p "
1259 "iomode %d offset %llu length %llu before "
1260 "lp %p iomode %d offset %llu length %llu\n",
1261 __func__, lseg, lseg->pls_range.iomode,
1262 lseg->pls_range.offset, lseg->pls_range.length,
1263 lp, lp->pls_range.iomode, lp->pls_range.offset,
1264 lp->pls_range.length);
1265 goto out;
1266 }
1267 list_add_tail(&lseg->pls_list, &lo->plh_segs);
1268 dprintk("%s: inserted lseg %p "
1269 "iomode %d offset %llu length %llu at tail\n",
1270 __func__, lseg, lseg->pls_range.iomode,
1271 lseg->pls_range.offset, lseg->pls_range.length);
1272out:
1273 pnfs_get_layout_hdr(lo);
1274
1275 dprintk("%s:Return\n", __func__);
1276}
1277EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
1278
1279static void
1280pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1281 struct pnfs_layout_segment *lseg,
1282 struct list_head *free_me)
1283{
1284 struct inode *inode = lo->plh_inode;
1285 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1286
1287 if (ld->add_lseg != NULL)
1288 ld->add_lseg(lo, lseg, free_me);
1289 else
1290 pnfs_generic_layout_insert_lseg(lo, lseg,
1291 pnfs_lseg_range_is_after,
1292 pnfs_lseg_no_merge,
1293 free_me);
1294}
1295
1296static struct pnfs_layout_hdr *
1297alloc_init_layout_hdr(struct inode *ino,
1298 struct nfs_open_context *ctx,
1299 gfp_t gfp_flags)
1300{
1301 struct pnfs_layout_hdr *lo;
1302
1303 lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1304 if (!lo)
1305 return NULL;
1306 atomic_set(&lo->plh_refcount, 1);
1307 INIT_LIST_HEAD(&lo->plh_layouts);
1308 INIT_LIST_HEAD(&lo->plh_segs);
1309 INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1310 lo->plh_inode = ino;
1311 lo->plh_lc_cred = get_rpccred(ctx->cred);
1312 return lo;
1313}
1314
1315static struct pnfs_layout_hdr *
1316pnfs_find_alloc_layout(struct inode *ino,
1317 struct nfs_open_context *ctx,
1318 gfp_t gfp_flags)
1319{
1320 struct nfs_inode *nfsi = NFS_I(ino);
1321 struct pnfs_layout_hdr *new = NULL;
1322
1323 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1324
1325 if (nfsi->layout != NULL)
1326 goto out_existing;
1327 spin_unlock(&ino->i_lock);
1328 new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1329 spin_lock(&ino->i_lock);
1330
1331 if (likely(nfsi->layout == NULL)) { /* Won the race? */
1332 nfsi->layout = new;
1333 return new;
1334 } else if (new != NULL)
1335 pnfs_free_layout_hdr(new);
1336out_existing:
1337 pnfs_get_layout_hdr(nfsi->layout);
1338 return nfsi->layout;
1339}
1340
1341/*
1342 * iomode matching rules:
1343 * iomode lseg match
1344 * ----- ----- -----
1345 * ANY READ true
1346 * ANY RW true
1347 * RW READ false
1348 * RW RW true
1349 * READ READ true
1350 * READ RW true
1351 */
1352static bool
1353pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1354 const struct pnfs_layout_range *range)
1355{
1356 struct pnfs_layout_range range1;
1357
1358 if ((range->iomode == IOMODE_RW &&
1359 ls_range->iomode != IOMODE_RW) ||
1360 !pnfs_lseg_range_intersecting(ls_range, range))
1361 return 0;
1362
1363 /* range1 covers only the first byte in the range */
1364 range1 = *range;
1365 range1.length = 1;
1366 return pnfs_lseg_range_contained(ls_range, &range1);
1367}
1368
1369/*
1370 * lookup range in layout
1371 */
1372static struct pnfs_layout_segment *
1373pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1374 struct pnfs_layout_range *range)
1375{
1376 struct pnfs_layout_segment *lseg, *ret = NULL;
1377
1378 dprintk("%s:Begin\n", __func__);
1379
1380 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1381 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1382 !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
1383 pnfs_lseg_range_match(&lseg->pls_range, range)) {
1384 ret = pnfs_get_lseg(lseg);
1385 break;
1386 }
1387 }
1388
1389 dprintk("%s:Return lseg %p ref %d\n",
1390 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1391 return ret;
1392}
1393
1394/*
1395 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1396 * to the MDS or over pNFS
1397 *
1398 * The nfs_inode read_io and write_io fields are cumulative counters reset
1399 * when there are no layout segments. Note that in pnfs_update_layout iomode
1400 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1401 * WRITE request.
1402 *
1403 * A return of true means use MDS I/O.
1404 *
1405 * From rfc 5661:
1406 * If a file's size is smaller than the file size threshold, data accesses
1407 * SHOULD be sent to the metadata server. If an I/O request has a length that
1408 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1409 * server. If both file size and I/O size are provided, the client SHOULD
1410 * reach or exceed both thresholds before sending its read or write
1411 * requests to the data server.
1412 */
1413static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1414 struct inode *ino, int iomode)
1415{
1416 struct nfs4_threshold *t = ctx->mdsthreshold;
1417 struct nfs_inode *nfsi = NFS_I(ino);
1418 loff_t fsize = i_size_read(ino);
1419 bool size = false, size_set = false, io = false, io_set = false, ret = false;
1420
1421 if (t == NULL)
1422 return ret;
1423
1424 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1425 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1426
1427 switch (iomode) {
1428 case IOMODE_READ:
1429 if (t->bm & THRESHOLD_RD) {
1430 dprintk("%s fsize %llu\n", __func__, fsize);
1431 size_set = true;
1432 if (fsize < t->rd_sz)
1433 size = true;
1434 }
1435 if (t->bm & THRESHOLD_RD_IO) {
1436 dprintk("%s nfsi->read_io %llu\n", __func__,
1437 nfsi->read_io);
1438 io_set = true;
1439 if (nfsi->read_io < t->rd_io_sz)
1440 io = true;
1441 }
1442 break;
1443 case IOMODE_RW:
1444 if (t->bm & THRESHOLD_WR) {
1445 dprintk("%s fsize %llu\n", __func__, fsize);
1446 size_set = true;
1447 if (fsize < t->wr_sz)
1448 size = true;
1449 }
1450 if (t->bm & THRESHOLD_WR_IO) {
1451 dprintk("%s nfsi->write_io %llu\n", __func__,
1452 nfsi->write_io);
1453 io_set = true;
1454 if (nfsi->write_io < t->wr_io_sz)
1455 io = true;
1456 }
1457 break;
1458 }
1459 if (size_set && io_set) {
1460 if (size && io)
1461 ret = true;
1462 } else if (size || io)
1463 ret = true;
1464
1465 dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1466 return ret;
1467}
1468
1469/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
1470static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode)
1471{
1472 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
1473 return 1;
1474 return nfs_wait_bit_killable(key, mode);
1475}
1476
1477static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1478{
1479 if (!pnfs_should_retry_layoutget(lo))
1480 return false;
1481 /*
1482 * send layoutcommit as it can hold up layoutreturn due to lseg
1483 * reference
1484 */
1485 pnfs_layoutcommit_inode(lo->plh_inode, false);
1486 return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1487 pnfs_layoutget_retry_bit_wait,
1488 TASK_UNINTERRUPTIBLE);
1489}
1490
1491static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1492{
1493 unsigned long *bitlock = &lo->plh_flags;
1494
1495 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1496 smp_mb__after_atomic();
1497 wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1498}
1499
1500/*
1501 * Layout segment is retreived from the server if not cached.
1502 * The appropriate layout segment is referenced and returned to the caller.
1503 */
1504struct pnfs_layout_segment *
1505pnfs_update_layout(struct inode *ino,
1506 struct nfs_open_context *ctx,
1507 loff_t pos,
1508 u64 count,
1509 enum pnfs_iomode iomode,
1510 gfp_t gfp_flags)
1511{
1512 struct pnfs_layout_range arg = {
1513 .iomode = iomode,
1514 .offset = pos,
1515 .length = count,
1516 };
1517 unsigned pg_offset;
1518 struct nfs_server *server = NFS_SERVER(ino);
1519 struct nfs_client *clp = server->nfs_client;
1520 struct pnfs_layout_hdr *lo;
1521 struct pnfs_layout_segment *lseg = NULL;
1522 bool first;
1523
1524 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1525 goto out;
1526
1527 if (iomode == IOMODE_READ && i_size_read(ino) == 0)
1528 goto out;
1529
1530 if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1531 goto out;
1532
1533lookup_again:
1534 nfs4_client_recover_expired_lease(clp);
1535 first = false;
1536 spin_lock(&ino->i_lock);
1537 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1538 if (lo == NULL) {
1539 spin_unlock(&ino->i_lock);
1540 goto out;
1541 }
1542
1543 /* Do we even need to bother with this? */
1544 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1545 dprintk("%s matches recall, use MDS\n", __func__);
1546 goto out_unlock;
1547 }
1548
1549 /* if LAYOUTGET already failed once we don't try again */
1550 if (pnfs_layout_io_test_failed(lo, iomode) &&
1551 !pnfs_should_retry_layoutget(lo))
1552 goto out_unlock;
1553
1554 first = list_empty(&lo->plh_segs);
1555 if (first) {
1556 /* The first layoutget for the file. Need to serialize per
1557 * RFC 5661 Errata 3208.
1558 */
1559 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
1560 &lo->plh_flags)) {
1561 spin_unlock(&ino->i_lock);
1562 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
1563 TASK_UNINTERRUPTIBLE);
1564 pnfs_put_layout_hdr(lo);
1565 goto lookup_again;
1566 }
1567 } else {
1568 /* Check to see if the layout for the given range
1569 * already exists
1570 */
1571 lseg = pnfs_find_lseg(lo, &arg);
1572 if (lseg)
1573 goto out_unlock;
1574 }
1575
1576 /*
1577 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1578 * for LAYOUTRETURN even if first is true.
1579 */
1580 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1581 spin_unlock(&ino->i_lock);
1582 dprintk("%s wait for layoutreturn\n", __func__);
1583 if (pnfs_prepare_to_retry_layoutget(lo)) {
1584 if (first)
1585 pnfs_clear_first_layoutget(lo);
1586 pnfs_put_layout_hdr(lo);
1587 dprintk("%s retrying\n", __func__);
1588 goto lookup_again;
1589 }
1590 goto out_put_layout_hdr;
1591 }
1592
1593 if (pnfs_layoutgets_blocked(lo))
1594 goto out_unlock;
1595 atomic_inc(&lo->plh_outstanding);
1596 spin_unlock(&ino->i_lock);
1597
1598 if (list_empty(&lo->plh_layouts)) {
1599 /* The lo must be on the clp list if there is any
1600 * chance of a CB_LAYOUTRECALL(FILE) coming in.
1601 */
1602 spin_lock(&clp->cl_lock);
1603 if (list_empty(&lo->plh_layouts))
1604 list_add_tail(&lo->plh_layouts, &server->layouts);
1605 spin_unlock(&clp->cl_lock);
1606 }
1607
1608 pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1609 if (pg_offset) {
1610 arg.offset -= pg_offset;
1611 arg.length += pg_offset;
1612 }
1613 if (arg.length != NFS4_MAX_UINT64)
1614 arg.length = PAGE_CACHE_ALIGN(arg.length);
1615
1616 lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1617 pnfs_clear_retry_layoutget(lo);
1618 atomic_dec(&lo->plh_outstanding);
1619out_put_layout_hdr:
1620 if (first)
1621 pnfs_clear_first_layoutget(lo);
1622 pnfs_put_layout_hdr(lo);
1623out:
1624 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1625 "(%s, offset: %llu, length: %llu)\n",
1626 __func__, ino->i_sb->s_id,
1627 (unsigned long long)NFS_FILEID(ino),
1628 lseg == NULL ? "not found" : "found",
1629 iomode==IOMODE_RW ? "read/write" : "read-only",
1630 (unsigned long long)pos,
1631 (unsigned long long)count);
1632 return lseg;
1633out_unlock:
1634 spin_unlock(&ino->i_lock);
1635 goto out_put_layout_hdr;
1636}
1637EXPORT_SYMBOL_GPL(pnfs_update_layout);
1638
1639static bool
1640pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
1641{
1642 switch (range->iomode) {
1643 case IOMODE_READ:
1644 case IOMODE_RW:
1645 break;
1646 default:
1647 return false;
1648 }
1649 if (range->offset == NFS4_MAX_UINT64)
1650 return false;
1651 if (range->length == 0)
1652 return false;
1653 if (range->length != NFS4_MAX_UINT64 &&
1654 range->length > NFS4_MAX_UINT64 - range->offset)
1655 return false;
1656 return true;
1657}
1658
1659struct pnfs_layout_segment *
1660pnfs_layout_process(struct nfs4_layoutget *lgp)
1661{
1662 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1663 struct nfs4_layoutget_res *res = &lgp->res;
1664 struct pnfs_layout_segment *lseg;
1665 struct inode *ino = lo->plh_inode;
1666 LIST_HEAD(free_me);
1667 int status = -EINVAL;
1668
1669 if (!pnfs_sanity_check_layout_range(&res->range))
1670 goto out;
1671
1672 /* Inject layout blob into I/O device driver */
1673 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1674 if (!lseg || IS_ERR(lseg)) {
1675 if (!lseg)
1676 status = -ENOMEM;
1677 else
1678 status = PTR_ERR(lseg);
1679 dprintk("%s: Could not allocate layout: error %d\n",
1680 __func__, status);
1681 goto out;
1682 }
1683
1684 init_lseg(lo, lseg);
1685 lseg->pls_range = res->range;
1686
1687 spin_lock(&ino->i_lock);
1688 if (pnfs_layoutgets_blocked(lo)) {
1689 dprintk("%s forget reply due to state\n", __func__);
1690 goto out_forget_reply;
1691 }
1692
1693 if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1694 /* existing state ID, make sure the sequence number matches. */
1695 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1696 dprintk("%s forget reply due to sequence\n", __func__);
1697 status = -EAGAIN;
1698 goto out_forget_reply;
1699 }
1700 pnfs_set_layout_stateid(lo, &res->stateid, false);
1701 } else {
1702 /*
1703 * We got an entirely new state ID. Mark all segments for the
1704 * inode invalid, and don't bother validating the stateid
1705 * sequence number.
1706 */
1707 pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
1708
1709 nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
1710 lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
1711 }
1712
1713 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1714
1715 pnfs_get_lseg(lseg);
1716 pnfs_layout_insert_lseg(lo, lseg, &free_me);
1717
1718 if (res->return_on_close)
1719 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1720
1721 spin_unlock(&ino->i_lock);
1722 pnfs_free_lseg_list(&free_me);
1723 return lseg;
1724out:
1725 return ERR_PTR(status);
1726
1727out_forget_reply:
1728 spin_unlock(&ino->i_lock);
1729 lseg->pls_layout = lo;
1730 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1731 goto out;
1732}
1733
1734static void
1735pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
1736 struct list_head *tmp_list,
1737 struct pnfs_layout_range *return_range)
1738{
1739 struct pnfs_layout_segment *lseg, *next;
1740
1741 dprintk("%s:Begin lo %p\n", __func__, lo);
1742
1743 if (list_empty(&lo->plh_segs))
1744 return;
1745
1746 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
1747 if (should_free_lseg(&lseg->pls_range, return_range)) {
1748 dprintk("%s: marking lseg %p iomode %d "
1749 "offset %llu length %llu\n", __func__,
1750 lseg, lseg->pls_range.iomode,
1751 lseg->pls_range.offset,
1752 lseg->pls_range.length);
1753 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1754 mark_lseg_invalid(lseg, tmp_list);
1755 set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1756 &lo->plh_flags);
1757 }
1758}
1759
1760void pnfs_error_mark_layout_for_return(struct inode *inode,
1761 struct pnfs_layout_segment *lseg)
1762{
1763 struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
1764 int iomode = pnfs_iomode_to_fail_bit(lseg->pls_range.iomode);
1765 struct pnfs_layout_range range = {
1766 .iomode = lseg->pls_range.iomode,
1767 .offset = 0,
1768 .length = NFS4_MAX_UINT64,
1769 };
1770 LIST_HEAD(free_me);
1771
1772 spin_lock(&inode->i_lock);
1773 /* set failure bit so that pnfs path will be retried later */
1774 pnfs_layout_set_fail_bit(lo, iomode);
1775 if (lo->plh_return_iomode == 0)
1776 lo->plh_return_iomode = range.iomode;
1777 else if (lo->plh_return_iomode != range.iomode)
1778 lo->plh_return_iomode = IOMODE_ANY;
1779 /*
1780 * mark all matching lsegs so that we are sure to have no live
1781 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
1782 * for how it works.
1783 */
1784 pnfs_mark_matching_lsegs_return(lo, &free_me, &range);
1785 spin_unlock(&inode->i_lock);
1786 pnfs_free_lseg_list(&free_me);
1787}
1788EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
1789
1790void
1791pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1792{
1793 u64 rd_size = req->wb_bytes;
1794
1795 if (pgio->pg_lseg == NULL) {
1796 if (pgio->pg_dreq == NULL)
1797 rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
1798 else
1799 rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1800
1801 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1802 req->wb_context,
1803 req_offset(req),
1804 rd_size,
1805 IOMODE_READ,
1806 GFP_KERNEL);
1807 }
1808 /* If no lseg, fall back to read through mds */
1809 if (pgio->pg_lseg == NULL)
1810 nfs_pageio_reset_read_mds(pgio);
1811
1812}
1813EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1814
1815void
1816pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
1817 struct nfs_page *req, u64 wb_size)
1818{
1819 if (pgio->pg_lseg == NULL)
1820 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1821 req->wb_context,
1822 req_offset(req),
1823 wb_size,
1824 IOMODE_RW,
1825 GFP_NOFS);
1826 /* If no lseg, fall back to write through mds */
1827 if (pgio->pg_lseg == NULL)
1828 nfs_pageio_reset_write_mds(pgio);
1829}
1830EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1831
1832void
1833pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
1834{
1835 if (desc->pg_lseg) {
1836 pnfs_put_lseg(desc->pg_lseg);
1837 desc->pg_lseg = NULL;
1838 }
1839}
1840EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
1841
1842/*
1843 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1844 * of bytes (maximum @req->wb_bytes) that can be coalesced.
1845 */
1846size_t
1847pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
1848 struct nfs_page *prev, struct nfs_page *req)
1849{
1850 unsigned int size;
1851 u64 seg_end, req_start, seg_left;
1852
1853 size = nfs_generic_pg_test(pgio, prev, req);
1854 if (!size)
1855 return 0;
1856
1857 /*
1858 * 'size' contains the number of bytes left in the current page (up
1859 * to the original size asked for in @req->wb_bytes).
1860 *
1861 * Calculate how many bytes are left in the layout segment
1862 * and if there are less bytes than 'size', return that instead.
1863 *
1864 * Please also note that 'end_offset' is actually the offset of the
1865 * first byte that lies outside the pnfs_layout_range. FIXME?
1866 *
1867 */
1868 if (pgio->pg_lseg) {
1869 seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
1870 pgio->pg_lseg->pls_range.length);
1871 req_start = req_offset(req);
1872 WARN_ON_ONCE(req_start >= seg_end);
1873 /* start of request is past the last byte of this segment */
1874 if (req_start >= seg_end) {
1875 /* reference the new lseg */
1876 if (pgio->pg_ops->pg_cleanup)
1877 pgio->pg_ops->pg_cleanup(pgio);
1878 if (pgio->pg_ops->pg_init)
1879 pgio->pg_ops->pg_init(pgio, req);
1880 return 0;
1881 }
1882
1883 /* adjust 'size' iff there are fewer bytes left in the
1884 * segment than what nfs_generic_pg_test returned */
1885 seg_left = seg_end - req_start;
1886 if (seg_left < size)
1887 size = (unsigned int)seg_left;
1888 }
1889
1890 return size;
1891}
1892EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1893
1894int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
1895{
1896 struct nfs_pageio_descriptor pgio;
1897
1898 /* Resend all requests through the MDS */
1899 nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
1900 hdr->completion_ops);
1901 set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
1902 return nfs_pageio_resend(&pgio, hdr);
1903}
1904EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1905
1906static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
1907{
1908
1909 dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1910 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1911 PNFS_LAYOUTRET_ON_ERROR) {
1912 pnfs_return_layout(hdr->inode);
1913 }
1914 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1915 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
1916}
1917
1918/*
1919 * Called by non rpc-based layout drivers
1920 */
1921void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
1922{
1923 if (likely(!hdr->pnfs_error)) {
1924 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
1925 hdr->mds_offset + hdr->res.count);
1926 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1927 }
1928 trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
1929 if (unlikely(hdr->pnfs_error))
1930 pnfs_ld_handle_write_error(hdr);
1931 hdr->mds_ops->rpc_release(hdr);
1932}
1933EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1934
1935static void
1936pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1937 struct nfs_pgio_header *hdr)
1938{
1939 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1940
1941 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1942 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
1943 nfs_pageio_reset_write_mds(desc);
1944 mirror->pg_recoalesce = 1;
1945 }
1946 hdr->release(hdr);
1947}
1948
1949static enum pnfs_try_status
1950pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
1951 const struct rpc_call_ops *call_ops,
1952 struct pnfs_layout_segment *lseg,
1953 int how)
1954{
1955 struct inode *inode = hdr->inode;
1956 enum pnfs_try_status trypnfs;
1957 struct nfs_server *nfss = NFS_SERVER(inode);
1958
1959 hdr->mds_ops = call_ops;
1960
1961 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1962 inode->i_ino, hdr->args.count, hdr->args.offset, how);
1963 trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
1964 if (trypnfs != PNFS_NOT_ATTEMPTED)
1965 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1966 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1967 return trypnfs;
1968}
1969
1970static void
1971pnfs_do_write(struct nfs_pageio_descriptor *desc,
1972 struct nfs_pgio_header *hdr, int how)
1973{
1974 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1975 struct pnfs_layout_segment *lseg = desc->pg_lseg;
1976 enum pnfs_try_status trypnfs;
1977
1978 trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
1979 if (trypnfs == PNFS_NOT_ATTEMPTED)
1980 pnfs_write_through_mds(desc, hdr);
1981}
1982
1983static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1984{
1985 pnfs_put_lseg(hdr->lseg);
1986 nfs_pgio_header_free(hdr);
1987}
1988
1989int
1990pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1991{
1992 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
1993
1994 struct nfs_pgio_header *hdr;
1995 int ret;
1996
1997 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1998 if (!hdr) {
1999 desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
2000 return -ENOMEM;
2001 }
2002 nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2003
2004 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2005 ret = nfs_generic_pgio(desc, hdr);
2006 if (!ret)
2007 pnfs_do_write(desc, hdr, desc->pg_ioflags);
2008
2009 return ret;
2010}
2011EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
2012
2013int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2014{
2015 struct nfs_pageio_descriptor pgio;
2016
2017 /* Resend all requests through the MDS */
2018 nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2019 return nfs_pageio_resend(&pgio, hdr);
2020}
2021EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2022
2023static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2024{
2025 dprintk("pnfs read error = %d\n", hdr->pnfs_error);
2026 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2027 PNFS_LAYOUTRET_ON_ERROR) {
2028 pnfs_return_layout(hdr->inode);
2029 }
2030 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2031 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2032}
2033
2034/*
2035 * Called by non rpc-based layout drivers
2036 */
2037void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2038{
2039 if (likely(!hdr->pnfs_error)) {
2040 __nfs4_read_done_cb(hdr);
2041 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2042 }
2043 trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
2044 if (unlikely(hdr->pnfs_error))
2045 pnfs_ld_handle_read_error(hdr);
2046 hdr->mds_ops->rpc_release(hdr);
2047}
2048EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
2049
2050static void
2051pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2052 struct nfs_pgio_header *hdr)
2053{
2054 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2055
2056 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2057 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2058 nfs_pageio_reset_read_mds(desc);
2059 mirror->pg_recoalesce = 1;
2060 }
2061 hdr->release(hdr);
2062}
2063
2064/*
2065 * Call the appropriate parallel I/O subsystem read function.
2066 */
2067static enum pnfs_try_status
2068pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2069 const struct rpc_call_ops *call_ops,
2070 struct pnfs_layout_segment *lseg)
2071{
2072 struct inode *inode = hdr->inode;
2073 struct nfs_server *nfss = NFS_SERVER(inode);
2074 enum pnfs_try_status trypnfs;
2075
2076 hdr->mds_ops = call_ops;
2077
2078 dprintk("%s: Reading ino:%lu %u@%llu\n",
2079 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2080
2081 trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2082 if (trypnfs != PNFS_NOT_ATTEMPTED)
2083 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2084 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2085 return trypnfs;
2086}
2087
2088/* Resend all requests through pnfs. */
2089int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2090{
2091 struct nfs_pageio_descriptor pgio;
2092
2093 nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops);
2094 return nfs_pageio_resend(&pgio, hdr);
2095}
2096EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
2097
2098static void
2099pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2100{
2101 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2102 struct pnfs_layout_segment *lseg = desc->pg_lseg;
2103 enum pnfs_try_status trypnfs;
2104 int err = 0;
2105
2106 trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2107 if (trypnfs == PNFS_TRY_AGAIN)
2108 err = pnfs_read_resend_pnfs(hdr);
2109 if (trypnfs == PNFS_NOT_ATTEMPTED || err)
2110 pnfs_read_through_mds(desc, hdr);
2111}
2112
2113static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
2114{
2115 pnfs_put_lseg(hdr->lseg);
2116 nfs_pgio_header_free(hdr);
2117}
2118
2119int
2120pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
2121{
2122 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2123
2124 struct nfs_pgio_header *hdr;
2125 int ret;
2126
2127 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2128 if (!hdr) {
2129 desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
2130 return -ENOMEM;
2131 }
2132 nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2133 hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2134 ret = nfs_generic_pgio(desc, hdr);
2135 if (!ret)
2136 pnfs_do_read(desc, hdr);
2137 return ret;
2138}
2139EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
2140
2141static void pnfs_clear_layoutcommitting(struct inode *inode)
2142{
2143 unsigned long *bitlock = &NFS_I(inode)->flags;
2144
2145 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2146 smp_mb__after_atomic();
2147 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
2148}
2149
2150/*
2151 * There can be multiple RW segments.
2152 */
2153static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
2154{
2155 struct pnfs_layout_segment *lseg;
2156
2157 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
2158 if (lseg->pls_range.iomode == IOMODE_RW &&
2159 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2160 list_add(&lseg->pls_lc_list, listp);
2161 }
2162}
2163
2164static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
2165{
2166 struct pnfs_layout_segment *lseg, *tmp;
2167
2168 /* Matched by references in pnfs_set_layoutcommit */
2169 list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
2170 list_del_init(&lseg->pls_lc_list);
2171 pnfs_put_lseg(lseg);
2172 }
2173
2174 pnfs_clear_layoutcommitting(inode);
2175}
2176
2177void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
2178{
2179 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
2180}
2181EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
2182
2183void
2184pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
2185 loff_t end_pos)
2186{
2187 struct nfs_inode *nfsi = NFS_I(inode);
2188 bool mark_as_dirty = false;
2189
2190 spin_lock(&inode->i_lock);
2191 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2192 nfsi->layout->plh_lwb = end_pos;
2193 mark_as_dirty = true;
2194 dprintk("%s: Set layoutcommit for inode %lu ",
2195 __func__, inode->i_ino);
2196 } else if (end_pos > nfsi->layout->plh_lwb)
2197 nfsi->layout->plh_lwb = end_pos;
2198 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2199 /* references matched in nfs4_layoutcommit_release */
2200 pnfs_get_lseg(lseg);
2201 }
2202 spin_unlock(&inode->i_lock);
2203 dprintk("%s: lseg %p end_pos %llu\n",
2204 __func__, lseg, nfsi->layout->plh_lwb);
2205
2206 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2207 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2208 if (mark_as_dirty)
2209 mark_inode_dirty_sync(inode);
2210}
2211EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
2212
2213void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
2214{
2215 struct nfs_server *nfss = NFS_SERVER(data->args.inode);
2216
2217 if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
2218 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2219 pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
2220}
2221
2222/*
2223 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2224 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2225 * data to disk to allow the server to recover the data if it crashes.
2226 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2227 * is off, and a COMMIT is sent to a data server, or
2228 * if WRITEs to a data server return NFS_DATA_SYNC.
2229 */
2230int
2231pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2232{
2233 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2234 struct nfs4_layoutcommit_data *data;
2235 struct nfs_inode *nfsi = NFS_I(inode);
2236 loff_t end_pos;
2237 int status;
2238
2239 if (!pnfs_layoutcommit_outstanding(inode))
2240 return 0;
2241
2242 dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
2243
2244 status = -EAGAIN;
2245 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2246 if (!sync)
2247 goto out;
2248 status = wait_on_bit_lock_action(&nfsi->flags,
2249 NFS_INO_LAYOUTCOMMITTING,
2250 nfs_wait_bit_killable,
2251 TASK_KILLABLE);
2252 if (status)
2253 goto out;
2254 }
2255
2256 status = -ENOMEM;
2257 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2258 data = kzalloc(sizeof(*data), GFP_NOFS);
2259 if (!data)
2260 goto clear_layoutcommitting;
2261
2262 status = 0;
2263 spin_lock(&inode->i_lock);
2264 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
2265 goto out_unlock;
2266
2267 INIT_LIST_HEAD(&data->lseg_list);
2268 pnfs_list_write_lseg(inode, &data->lseg_list);
2269
2270 end_pos = nfsi->layout->plh_lwb;
2271
2272 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
2273 spin_unlock(&inode->i_lock);
2274
2275 data->args.inode = inode;
2276 data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
2277 nfs_fattr_init(&data->fattr);
2278 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
2279 data->res.fattr = &data->fattr;
2280 data->args.lastbytewritten = end_pos - 1;
2281 data->res.server = NFS_SERVER(inode);
2282
2283 if (ld->prepare_layoutcommit) {
2284 status = ld->prepare_layoutcommit(&data->args);
2285 if (status) {
2286 put_rpccred(data->cred);
2287 spin_lock(&inode->i_lock);
2288 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2289 if (end_pos > nfsi->layout->plh_lwb)
2290 nfsi->layout->plh_lwb = end_pos;
2291 goto out_unlock;
2292 }
2293 }
2294
2295
2296 status = nfs4_proc_layoutcommit(data, sync);
2297out:
2298 if (status)
2299 mark_inode_dirty_sync(inode);
2300 dprintk("<-- %s status %d\n", __func__, status);
2301 return status;
2302out_unlock:
2303 spin_unlock(&inode->i_lock);
2304 kfree(data);
2305clear_layoutcommitting:
2306 pnfs_clear_layoutcommitting(inode);
2307 goto out;
2308}
2309EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2310
2311int
2312pnfs_generic_sync(struct inode *inode, bool datasync)
2313{
2314 return pnfs_layoutcommit_inode(inode, true);
2315}
2316EXPORT_SYMBOL_GPL(pnfs_generic_sync);
2317
2318struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
2319{
2320 struct nfs4_threshold *thp;
2321
2322 thp = kzalloc(sizeof(*thp), GFP_NOFS);
2323 if (!thp) {
2324 dprintk("%s mdsthreshold allocation failed\n", __func__);
2325 return NULL;
2326 }
2327 return thp;
2328}
2329
2330#if IS_ENABLED(CONFIG_NFS_V4_2)
2331int
2332pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2333{
2334 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2335 struct nfs_server *server = NFS_SERVER(inode);
2336 struct nfs_inode *nfsi = NFS_I(inode);
2337 struct nfs42_layoutstat_data *data;
2338 struct pnfs_layout_hdr *hdr;
2339 int status = 0;
2340
2341 if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
2342 goto out;
2343
2344 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
2345 goto out;
2346
2347 if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
2348 goto out;
2349
2350 spin_lock(&inode->i_lock);
2351 if (!NFS_I(inode)->layout) {
2352 spin_unlock(&inode->i_lock);
2353 goto out;
2354 }
2355 hdr = NFS_I(inode)->layout;
2356 pnfs_get_layout_hdr(hdr);
2357 spin_unlock(&inode->i_lock);
2358
2359 data = kzalloc(sizeof(*data), gfp_flags);
2360 if (!data) {
2361 status = -ENOMEM;
2362 goto out_put;
2363 }
2364
2365 data->args.fh = NFS_FH(inode);
2366 data->args.inode = inode;
2367 nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
2368 status = ld->prepare_layoutstats(&data->args);
2369 if (status)
2370 goto out_free;
2371
2372 status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
2373
2374out:
2375 dprintk("%s returns %d\n", __func__, status);
2376 return status;
2377
2378out_free:
2379 kfree(data);
2380out_put:
2381 pnfs_put_layout_hdr(hdr);
2382 smp_mb__before_atomic();
2383 clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
2384 smp_mb__after_atomic();
2385 goto out;
2386}
2387EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
2388#endif
2389
2390unsigned int layoutstats_timer;
2391module_param(layoutstats_timer, uint, 0644);
2392EXPORT_SYMBOL_GPL(layoutstats_timer);