Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it would be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program; if not, write the Free Software Foundation, |
| 15 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 16 | */ |
| 17 | |
| 18 | #include "xfs.h" |
| 19 | #include "xfs_fs.h" |
| 20 | #include "xfs_format.h" |
| 21 | #include "xfs_log_format.h" |
| 22 | #include "xfs_shared.h" |
| 23 | #include "xfs_trans_resv.h" |
| 24 | #include "xfs_mount.h" |
| 25 | #include "xfs_error.h" |
| 26 | #include "xfs_alloc.h" |
| 27 | #include "xfs_extent_busy.h" |
| 28 | #include "xfs_discard.h" |
| 29 | #include "xfs_trans.h" |
| 30 | #include "xfs_trans_priv.h" |
| 31 | #include "xfs_log.h" |
| 32 | #include "xfs_log_priv.h" |
| 33 | |
| 34 | /* |
| 35 | * Allocate a new ticket. Failing to get a new ticket makes it really hard to |
| 36 | * recover, so we don't allow failure here. Also, we allocate in a context that |
| 37 | * we don't want to be issuing transactions from, so we need to tell the |
| 38 | * allocation code this as well. |
| 39 | * |
| 40 | * We don't reserve any space for the ticket - we are going to steal whatever |
| 41 | * space we require from transactions as they commit. To ensure we reserve all |
| 42 | * the space required, we need to set the current reservation of the ticket to |
| 43 | * zero so that we know to steal the initial transaction overhead from the |
| 44 | * first transaction commit. |
| 45 | */ |
| 46 | static struct xlog_ticket * |
| 47 | xlog_cil_ticket_alloc( |
| 48 | struct xlog *log) |
| 49 | { |
| 50 | struct xlog_ticket *tic; |
| 51 | |
| 52 | tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, |
| 53 | KM_SLEEP|KM_NOFS); |
| 54 | tic->t_trans_type = XFS_TRANS_CHECKPOINT; |
| 55 | |
| 56 | /* |
| 57 | * set the current reservation to zero so we know to steal the basic |
| 58 | * transaction overhead reservation from the first transaction commit. |
| 59 | */ |
| 60 | tic->t_curr_res = 0; |
| 61 | return tic; |
| 62 | } |
| 63 | |
| 64 | /* |
| 65 | * After the first stage of log recovery is done, we know where the head and |
| 66 | * tail of the log are. We need this log initialisation done before we can |
| 67 | * initialise the first CIL checkpoint context. |
| 68 | * |
| 69 | * Here we allocate a log ticket to track space usage during a CIL push. This |
| 70 | * ticket is passed to xlog_write() directly so that we don't slowly leak log |
| 71 | * space by failing to account for space used by log headers and additional |
| 72 | * region headers for split regions. |
| 73 | */ |
| 74 | void |
| 75 | xlog_cil_init_post_recovery( |
| 76 | struct xlog *log) |
| 77 | { |
| 78 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
| 79 | log->l_cilp->xc_ctx->sequence = 1; |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * Prepare the log item for insertion into the CIL. Calculate the difference in |
| 84 | * log space and vectors it will consume, and if it is a new item pin it as |
| 85 | * well. |
| 86 | */ |
| 87 | STATIC void |
| 88 | xfs_cil_prepare_item( |
| 89 | struct xlog *log, |
| 90 | struct xfs_log_vec *lv, |
| 91 | struct xfs_log_vec *old_lv, |
| 92 | int *diff_len, |
| 93 | int *diff_iovecs) |
| 94 | { |
| 95 | /* Account for the new LV being passed in */ |
| 96 | if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { |
| 97 | *diff_len += lv->lv_bytes; |
| 98 | *diff_iovecs += lv->lv_niovecs; |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * If there is no old LV, this is the first time we've seen the item in |
| 103 | * this CIL context and so we need to pin it. If we are replacing the |
| 104 | * old_lv, then remove the space it accounts for and free it. |
| 105 | */ |
| 106 | if (!old_lv) |
| 107 | lv->lv_item->li_ops->iop_pin(lv->lv_item); |
| 108 | else if (old_lv != lv) { |
| 109 | ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); |
| 110 | |
| 111 | *diff_len -= old_lv->lv_bytes; |
| 112 | *diff_iovecs -= old_lv->lv_niovecs; |
| 113 | kmem_free(old_lv); |
| 114 | } |
| 115 | |
| 116 | /* attach new log vector to log item */ |
| 117 | lv->lv_item->li_lv = lv; |
| 118 | |
| 119 | /* |
| 120 | * If this is the first time the item is being committed to the |
| 121 | * CIL, store the sequence number on the log item so we can |
| 122 | * tell in future commits whether this is the first checkpoint |
| 123 | * the item is being committed into. |
| 124 | */ |
| 125 | if (!lv->lv_item->li_seq) |
| 126 | lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; |
| 127 | } |
| 128 | |
| 129 | /* |
| 130 | * Format log item into a flat buffers |
| 131 | * |
| 132 | * For delayed logging, we need to hold a formatted buffer containing all the |
| 133 | * changes on the log item. This enables us to relog the item in memory and |
| 134 | * write it out asynchronously without needing to relock the object that was |
| 135 | * modified at the time it gets written into the iclog. |
| 136 | * |
| 137 | * This function builds a vector for the changes in each log item in the |
| 138 | * transaction. It then works out the length of the buffer needed for each log |
| 139 | * item, allocates them and formats the vector for the item into the buffer. |
| 140 | * The buffer is then attached to the log item are then inserted into the |
| 141 | * Committed Item List for tracking until the next checkpoint is written out. |
| 142 | * |
| 143 | * We don't set up region headers during this process; we simply copy the |
| 144 | * regions into the flat buffer. We can do this because we still have to do a |
| 145 | * formatting step to write the regions into the iclog buffer. Writing the |
| 146 | * ophdrs during the iclog write means that we can support splitting large |
| 147 | * regions across iclog boundares without needing a change in the format of the |
| 148 | * item/region encapsulation. |
| 149 | * |
| 150 | * Hence what we need to do now is change the rewrite the vector array to point |
| 151 | * to the copied region inside the buffer we just allocated. This allows us to |
| 152 | * format the regions into the iclog as though they are being formatted |
| 153 | * directly out of the objects themselves. |
| 154 | */ |
| 155 | static void |
| 156 | xlog_cil_insert_format_items( |
| 157 | struct xlog *log, |
| 158 | struct xfs_trans *tp, |
| 159 | int *diff_len, |
| 160 | int *diff_iovecs) |
| 161 | { |
| 162 | struct xfs_log_item_desc *lidp; |
| 163 | |
| 164 | |
| 165 | /* Bail out if we didn't find a log item. */ |
| 166 | if (list_empty(&tp->t_items)) { |
| 167 | ASSERT(0); |
| 168 | return; |
| 169 | } |
| 170 | |
| 171 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
| 172 | struct xfs_log_item *lip = lidp->lid_item; |
| 173 | struct xfs_log_vec *lv; |
| 174 | struct xfs_log_vec *old_lv; |
| 175 | int niovecs = 0; |
| 176 | int nbytes = 0; |
| 177 | int buf_size; |
| 178 | bool ordered = false; |
| 179 | |
| 180 | /* Skip items which aren't dirty in this transaction. */ |
| 181 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
| 182 | continue; |
| 183 | |
| 184 | /* get number of vecs and size of data to be stored */ |
| 185 | lip->li_ops->iop_size(lip, &niovecs, &nbytes); |
| 186 | |
| 187 | /* Skip items that do not have any vectors for writing */ |
| 188 | if (!niovecs) |
| 189 | continue; |
| 190 | |
| 191 | /* |
| 192 | * Ordered items need to be tracked but we do not wish to write |
| 193 | * them. We need a logvec to track the object, but we do not |
| 194 | * need an iovec or buffer to be allocated for copying data. |
| 195 | */ |
| 196 | if (niovecs == XFS_LOG_VEC_ORDERED) { |
| 197 | ordered = true; |
| 198 | niovecs = 0; |
| 199 | nbytes = 0; |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * We 64-bit align the length of each iovec so that the start |
| 204 | * of the next one is naturally aligned. We'll need to |
| 205 | * account for that slack space here. Then round nbytes up |
| 206 | * to 64-bit alignment so that the initial buffer alignment is |
| 207 | * easy to calculate and verify. |
| 208 | */ |
| 209 | nbytes += niovecs * sizeof(uint64_t); |
| 210 | nbytes = round_up(nbytes, sizeof(uint64_t)); |
| 211 | |
| 212 | /* grab the old item if it exists for reservation accounting */ |
| 213 | old_lv = lip->li_lv; |
| 214 | |
| 215 | /* |
| 216 | * The data buffer needs to start 64-bit aligned, so round up |
| 217 | * that space to ensure we can align it appropriately and not |
| 218 | * overrun the buffer. |
| 219 | */ |
| 220 | buf_size = nbytes + |
| 221 | round_up((sizeof(struct xfs_log_vec) + |
| 222 | niovecs * sizeof(struct xfs_log_iovec)), |
| 223 | sizeof(uint64_t)); |
| 224 | |
| 225 | /* compare to existing item size */ |
| 226 | if (lip->li_lv && buf_size <= lip->li_lv->lv_size) { |
| 227 | /* same or smaller, optimise common overwrite case */ |
| 228 | lv = lip->li_lv; |
| 229 | lv->lv_next = NULL; |
| 230 | |
| 231 | if (ordered) |
| 232 | goto insert; |
| 233 | |
| 234 | /* |
| 235 | * set the item up as though it is a new insertion so |
| 236 | * that the space reservation accounting is correct. |
| 237 | */ |
| 238 | *diff_iovecs -= lv->lv_niovecs; |
| 239 | *diff_len -= lv->lv_bytes; |
| 240 | } else { |
| 241 | /* allocate new data chunk */ |
| 242 | lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); |
| 243 | lv->lv_item = lip; |
| 244 | lv->lv_size = buf_size; |
| 245 | if (ordered) { |
| 246 | /* track as an ordered logvec */ |
| 247 | ASSERT(lip->li_lv == NULL); |
| 248 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; |
| 249 | goto insert; |
| 250 | } |
| 251 | lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; |
| 252 | } |
| 253 | |
| 254 | /* Ensure the lv is set up according to ->iop_size */ |
| 255 | lv->lv_niovecs = niovecs; |
| 256 | |
| 257 | /* The allocated data region lies beyond the iovec region */ |
| 258 | lv->lv_buf_len = 0; |
| 259 | lv->lv_bytes = 0; |
| 260 | lv->lv_buf = (char *)lv + buf_size - nbytes; |
| 261 | ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); |
| 262 | |
| 263 | lip->li_ops->iop_format(lip, lv); |
| 264 | insert: |
| 265 | ASSERT(lv->lv_buf_len <= nbytes); |
| 266 | xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * Insert the log items into the CIL and calculate the difference in space |
| 272 | * consumed by the item. Add the space to the checkpoint ticket and calculate |
| 273 | * if the change requires additional log metadata. If it does, take that space |
| 274 | * as well. Remove the amount of space we added to the checkpoint ticket from |
| 275 | * the current transaction ticket so that the accounting works out correctly. |
| 276 | */ |
| 277 | static void |
| 278 | xlog_cil_insert_items( |
| 279 | struct xlog *log, |
| 280 | struct xfs_trans *tp) |
| 281 | { |
| 282 | struct xfs_cil *cil = log->l_cilp; |
| 283 | struct xfs_cil_ctx *ctx = cil->xc_ctx; |
| 284 | struct xfs_log_item_desc *lidp; |
| 285 | int len = 0; |
| 286 | int diff_iovecs = 0; |
| 287 | int iclog_space; |
| 288 | |
| 289 | ASSERT(tp); |
| 290 | |
| 291 | /* |
| 292 | * We can do this safely because the context can't checkpoint until we |
| 293 | * are done so it doesn't matter exactly how we update the CIL. |
| 294 | */ |
| 295 | xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); |
| 296 | |
| 297 | /* |
| 298 | * Now (re-)position everything modified at the tail of the CIL. |
| 299 | * We do this here so we only need to take the CIL lock once during |
| 300 | * the transaction commit. |
| 301 | */ |
| 302 | spin_lock(&cil->xc_cil_lock); |
| 303 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
| 304 | struct xfs_log_item *lip = lidp->lid_item; |
| 305 | |
| 306 | /* Skip items which aren't dirty in this transaction. */ |
| 307 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
| 308 | continue; |
| 309 | |
| 310 | /* |
| 311 | * Only move the item if it isn't already at the tail. This is |
| 312 | * to prevent a transient list_empty() state when reinserting |
| 313 | * an item that is already the only item in the CIL. |
| 314 | */ |
| 315 | if (!list_is_last(&lip->li_cil, &cil->xc_cil)) |
| 316 | list_move_tail(&lip->li_cil, &cil->xc_cil); |
| 317 | } |
| 318 | |
| 319 | /* account for space used by new iovec headers */ |
| 320 | len += diff_iovecs * sizeof(xlog_op_header_t); |
| 321 | ctx->nvecs += diff_iovecs; |
| 322 | |
| 323 | /* attach the transaction to the CIL if it has any busy extents */ |
| 324 | if (!list_empty(&tp->t_busy)) |
| 325 | list_splice_init(&tp->t_busy, &ctx->busy_extents); |
| 326 | |
| 327 | /* |
| 328 | * Now transfer enough transaction reservation to the context ticket |
| 329 | * for the checkpoint. The context ticket is special - the unit |
| 330 | * reservation has to grow as well as the current reservation as we |
| 331 | * steal from tickets so we can correctly determine the space used |
| 332 | * during the transaction commit. |
| 333 | */ |
| 334 | if (ctx->ticket->t_curr_res == 0) { |
| 335 | ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; |
| 336 | tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res; |
| 337 | } |
| 338 | |
| 339 | /* do we need space for more log record headers? */ |
| 340 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; |
| 341 | if (len > 0 && (ctx->space_used / iclog_space != |
| 342 | (ctx->space_used + len) / iclog_space)) { |
| 343 | int hdrs; |
| 344 | |
| 345 | hdrs = (len + iclog_space - 1) / iclog_space; |
| 346 | /* need to take into account split region headers, too */ |
| 347 | hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); |
| 348 | ctx->ticket->t_unit_res += hdrs; |
| 349 | ctx->ticket->t_curr_res += hdrs; |
| 350 | tp->t_ticket->t_curr_res -= hdrs; |
| 351 | ASSERT(tp->t_ticket->t_curr_res >= len); |
| 352 | } |
| 353 | tp->t_ticket->t_curr_res -= len; |
| 354 | ctx->space_used += len; |
| 355 | |
| 356 | spin_unlock(&cil->xc_cil_lock); |
| 357 | } |
| 358 | |
| 359 | static void |
| 360 | xlog_cil_free_logvec( |
| 361 | struct xfs_log_vec *log_vector) |
| 362 | { |
| 363 | struct xfs_log_vec *lv; |
| 364 | |
| 365 | for (lv = log_vector; lv; ) { |
| 366 | struct xfs_log_vec *next = lv->lv_next; |
| 367 | kmem_free(lv); |
| 368 | lv = next; |
| 369 | } |
| 370 | } |
| 371 | |
| 372 | /* |
| 373 | * Mark all items committed and clear busy extents. We free the log vector |
| 374 | * chains in a separate pass so that we unpin the log items as quickly as |
| 375 | * possible. |
| 376 | */ |
| 377 | static void |
| 378 | xlog_cil_committed( |
| 379 | void *args, |
| 380 | int abort) |
| 381 | { |
| 382 | struct xfs_cil_ctx *ctx = args; |
| 383 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; |
| 384 | |
| 385 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, |
| 386 | ctx->start_lsn, abort); |
| 387 | |
| 388 | xfs_extent_busy_sort(&ctx->busy_extents); |
| 389 | xfs_extent_busy_clear(mp, &ctx->busy_extents, |
| 390 | (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); |
| 391 | |
| 392 | /* |
| 393 | * If we are aborting the commit, wake up anyone waiting on the |
| 394 | * committing list. If we don't, then a shutdown we can leave processes |
| 395 | * waiting in xlog_cil_force_lsn() waiting on a sequence commit that |
| 396 | * will never happen because we aborted it. |
| 397 | */ |
| 398 | spin_lock(&ctx->cil->xc_push_lock); |
| 399 | if (abort) |
| 400 | wake_up_all(&ctx->cil->xc_commit_wait); |
| 401 | list_del(&ctx->committing); |
| 402 | spin_unlock(&ctx->cil->xc_push_lock); |
| 403 | |
| 404 | xlog_cil_free_logvec(ctx->lv_chain); |
| 405 | |
| 406 | if (!list_empty(&ctx->busy_extents)) { |
| 407 | ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); |
| 408 | |
| 409 | xfs_discard_extents(mp, &ctx->busy_extents); |
| 410 | xfs_extent_busy_clear(mp, &ctx->busy_extents, false); |
| 411 | } |
| 412 | |
| 413 | kmem_free(ctx); |
| 414 | } |
| 415 | |
| 416 | /* |
| 417 | * Push the Committed Item List to the log. If @push_seq flag is zero, then it |
| 418 | * is a background flush and so we can chose to ignore it. Otherwise, if the |
| 419 | * current sequence is the same as @push_seq we need to do a flush. If |
| 420 | * @push_seq is less than the current sequence, then it has already been |
| 421 | * flushed and we don't need to do anything - the caller will wait for it to |
| 422 | * complete if necessary. |
| 423 | * |
| 424 | * @push_seq is a value rather than a flag because that allows us to do an |
| 425 | * unlocked check of the sequence number for a match. Hence we can allows log |
| 426 | * forces to run racily and not issue pushes for the same sequence twice. If we |
| 427 | * get a race between multiple pushes for the same sequence they will block on |
| 428 | * the first one and then abort, hence avoiding needless pushes. |
| 429 | */ |
| 430 | STATIC int |
| 431 | xlog_cil_push( |
| 432 | struct xlog *log) |
| 433 | { |
| 434 | struct xfs_cil *cil = log->l_cilp; |
| 435 | struct xfs_log_vec *lv; |
| 436 | struct xfs_cil_ctx *ctx; |
| 437 | struct xfs_cil_ctx *new_ctx; |
| 438 | struct xlog_in_core *commit_iclog; |
| 439 | struct xlog_ticket *tic; |
| 440 | int num_iovecs; |
| 441 | int error = 0; |
| 442 | struct xfs_trans_header thdr; |
| 443 | struct xfs_log_iovec lhdr; |
| 444 | struct xfs_log_vec lvhdr = { NULL }; |
| 445 | xfs_lsn_t commit_lsn; |
| 446 | xfs_lsn_t push_seq; |
| 447 | |
| 448 | if (!cil) |
| 449 | return 0; |
| 450 | |
| 451 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); |
| 452 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
| 453 | |
| 454 | down_write(&cil->xc_ctx_lock); |
| 455 | ctx = cil->xc_ctx; |
| 456 | |
| 457 | spin_lock(&cil->xc_push_lock); |
| 458 | push_seq = cil->xc_push_seq; |
| 459 | ASSERT(push_seq <= ctx->sequence); |
| 460 | |
| 461 | /* |
| 462 | * Check if we've anything to push. If there is nothing, then we don't |
| 463 | * move on to a new sequence number and so we have to be able to push |
| 464 | * this sequence again later. |
| 465 | */ |
| 466 | if (list_empty(&cil->xc_cil)) { |
| 467 | cil->xc_push_seq = 0; |
| 468 | spin_unlock(&cil->xc_push_lock); |
| 469 | goto out_skip; |
| 470 | } |
| 471 | |
| 472 | |
| 473 | /* check for a previously pushed seqeunce */ |
| 474 | if (push_seq < cil->xc_ctx->sequence) { |
| 475 | spin_unlock(&cil->xc_push_lock); |
| 476 | goto out_skip; |
| 477 | } |
| 478 | |
| 479 | /* |
| 480 | * We are now going to push this context, so add it to the committing |
| 481 | * list before we do anything else. This ensures that anyone waiting on |
| 482 | * this push can easily detect the difference between a "push in |
| 483 | * progress" and "CIL is empty, nothing to do". |
| 484 | * |
| 485 | * IOWs, a wait loop can now check for: |
| 486 | * the current sequence not being found on the committing list; |
| 487 | * an empty CIL; and |
| 488 | * an unchanged sequence number |
| 489 | * to detect a push that had nothing to do and therefore does not need |
| 490 | * waiting on. If the CIL is not empty, we get put on the committing |
| 491 | * list before emptying the CIL and bumping the sequence number. Hence |
| 492 | * an empty CIL and an unchanged sequence number means we jumped out |
| 493 | * above after doing nothing. |
| 494 | * |
| 495 | * Hence the waiter will either find the commit sequence on the |
| 496 | * committing list or the sequence number will be unchanged and the CIL |
| 497 | * still dirty. In that latter case, the push has not yet started, and |
| 498 | * so the waiter will have to continue trying to check the CIL |
| 499 | * committing list until it is found. In extreme cases of delay, the |
| 500 | * sequence may fully commit between the attempts the wait makes to wait |
| 501 | * on the commit sequence. |
| 502 | */ |
| 503 | list_add(&ctx->committing, &cil->xc_committing); |
| 504 | spin_unlock(&cil->xc_push_lock); |
| 505 | |
| 506 | /* |
| 507 | * pull all the log vectors off the items in the CIL, and |
| 508 | * remove the items from the CIL. We don't need the CIL lock |
| 509 | * here because it's only needed on the transaction commit |
| 510 | * side which is currently locked out by the flush lock. |
| 511 | */ |
| 512 | lv = NULL; |
| 513 | num_iovecs = 0; |
| 514 | while (!list_empty(&cil->xc_cil)) { |
| 515 | struct xfs_log_item *item; |
| 516 | |
| 517 | item = list_first_entry(&cil->xc_cil, |
| 518 | struct xfs_log_item, li_cil); |
| 519 | list_del_init(&item->li_cil); |
| 520 | if (!ctx->lv_chain) |
| 521 | ctx->lv_chain = item->li_lv; |
| 522 | else |
| 523 | lv->lv_next = item->li_lv; |
| 524 | lv = item->li_lv; |
| 525 | item->li_lv = NULL; |
| 526 | num_iovecs += lv->lv_niovecs; |
| 527 | } |
| 528 | |
| 529 | /* |
| 530 | * initialise the new context and attach it to the CIL. Then attach |
| 531 | * the current context to the CIL committing lsit so it can be found |
| 532 | * during log forces to extract the commit lsn of the sequence that |
| 533 | * needs to be forced. |
| 534 | */ |
| 535 | INIT_LIST_HEAD(&new_ctx->committing); |
| 536 | INIT_LIST_HEAD(&new_ctx->busy_extents); |
| 537 | new_ctx->sequence = ctx->sequence + 1; |
| 538 | new_ctx->cil = cil; |
| 539 | cil->xc_ctx = new_ctx; |
| 540 | |
| 541 | /* |
| 542 | * The switch is now done, so we can drop the context lock and move out |
| 543 | * of a shared context. We can't just go straight to the commit record, |
| 544 | * though - we need to synchronise with previous and future commits so |
| 545 | * that the commit records are correctly ordered in the log to ensure |
| 546 | * that we process items during log IO completion in the correct order. |
| 547 | * |
| 548 | * For example, if we get an EFI in one checkpoint and the EFD in the |
| 549 | * next (e.g. due to log forces), we do not want the checkpoint with |
| 550 | * the EFD to be committed before the checkpoint with the EFI. Hence |
| 551 | * we must strictly order the commit records of the checkpoints so |
| 552 | * that: a) the checkpoint callbacks are attached to the iclogs in the |
| 553 | * correct order; and b) the checkpoints are replayed in correct order |
| 554 | * in log recovery. |
| 555 | * |
| 556 | * Hence we need to add this context to the committing context list so |
| 557 | * that higher sequences will wait for us to write out a commit record |
| 558 | * before they do. |
| 559 | * |
| 560 | * xfs_log_force_lsn requires us to mirror the new sequence into the cil |
| 561 | * structure atomically with the addition of this sequence to the |
| 562 | * committing list. This also ensures that we can do unlocked checks |
| 563 | * against the current sequence in log forces without risking |
| 564 | * deferencing a freed context pointer. |
| 565 | */ |
| 566 | spin_lock(&cil->xc_push_lock); |
| 567 | cil->xc_current_sequence = new_ctx->sequence; |
| 568 | spin_unlock(&cil->xc_push_lock); |
| 569 | up_write(&cil->xc_ctx_lock); |
| 570 | |
| 571 | /* |
| 572 | * Build a checkpoint transaction header and write it to the log to |
| 573 | * begin the transaction. We need to account for the space used by the |
| 574 | * transaction header here as it is not accounted for in xlog_write(). |
| 575 | * |
| 576 | * The LSN we need to pass to the log items on transaction commit is |
| 577 | * the LSN reported by the first log vector write. If we use the commit |
| 578 | * record lsn then we can move the tail beyond the grant write head. |
| 579 | */ |
| 580 | tic = ctx->ticket; |
| 581 | thdr.th_magic = XFS_TRANS_HEADER_MAGIC; |
| 582 | thdr.th_type = XFS_TRANS_CHECKPOINT; |
| 583 | thdr.th_tid = tic->t_tid; |
| 584 | thdr.th_num_items = num_iovecs; |
| 585 | lhdr.i_addr = &thdr; |
| 586 | lhdr.i_len = sizeof(xfs_trans_header_t); |
| 587 | lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; |
| 588 | tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); |
| 589 | |
| 590 | lvhdr.lv_niovecs = 1; |
| 591 | lvhdr.lv_iovecp = &lhdr; |
| 592 | lvhdr.lv_next = ctx->lv_chain; |
| 593 | |
| 594 | error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); |
| 595 | if (error) |
| 596 | goto out_abort_free_ticket; |
| 597 | |
| 598 | /* |
| 599 | * now that we've written the checkpoint into the log, strictly |
| 600 | * order the commit records so replay will get them in the right order. |
| 601 | */ |
| 602 | restart: |
| 603 | spin_lock(&cil->xc_push_lock); |
| 604 | list_for_each_entry(new_ctx, &cil->xc_committing, committing) { |
| 605 | /* |
| 606 | * Avoid getting stuck in this loop because we were woken by the |
| 607 | * shutdown, but then went back to sleep once already in the |
| 608 | * shutdown state. |
| 609 | */ |
| 610 | if (XLOG_FORCED_SHUTDOWN(log)) { |
| 611 | spin_unlock(&cil->xc_push_lock); |
| 612 | goto out_abort_free_ticket; |
| 613 | } |
| 614 | |
| 615 | /* |
| 616 | * Higher sequences will wait for this one so skip them. |
| 617 | * Don't wait for our own sequence, either. |
| 618 | */ |
| 619 | if (new_ctx->sequence >= ctx->sequence) |
| 620 | continue; |
| 621 | if (!new_ctx->commit_lsn) { |
| 622 | /* |
| 623 | * It is still being pushed! Wait for the push to |
| 624 | * complete, then start again from the beginning. |
| 625 | */ |
| 626 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); |
| 627 | goto restart; |
| 628 | } |
| 629 | } |
| 630 | spin_unlock(&cil->xc_push_lock); |
| 631 | |
| 632 | /* xfs_log_done always frees the ticket on error. */ |
| 633 | commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false); |
| 634 | if (commit_lsn == -1) |
| 635 | goto out_abort; |
| 636 | |
| 637 | /* attach all the transactions w/ busy extents to iclog */ |
| 638 | ctx->log_cb.cb_func = xlog_cil_committed; |
| 639 | ctx->log_cb.cb_arg = ctx; |
| 640 | error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb); |
| 641 | if (error) |
| 642 | goto out_abort; |
| 643 | |
| 644 | /* |
| 645 | * now the checkpoint commit is complete and we've attached the |
| 646 | * callbacks to the iclog we can assign the commit LSN to the context |
| 647 | * and wake up anyone who is waiting for the commit to complete. |
| 648 | */ |
| 649 | spin_lock(&cil->xc_push_lock); |
| 650 | ctx->commit_lsn = commit_lsn; |
| 651 | wake_up_all(&cil->xc_commit_wait); |
| 652 | spin_unlock(&cil->xc_push_lock); |
| 653 | |
| 654 | /* release the hounds! */ |
| 655 | return xfs_log_release_iclog(log->l_mp, commit_iclog); |
| 656 | |
| 657 | out_skip: |
| 658 | up_write(&cil->xc_ctx_lock); |
| 659 | xfs_log_ticket_put(new_ctx->ticket); |
| 660 | kmem_free(new_ctx); |
| 661 | return 0; |
| 662 | |
| 663 | out_abort_free_ticket: |
| 664 | xfs_log_ticket_put(tic); |
| 665 | out_abort: |
| 666 | xlog_cil_committed(ctx, XFS_LI_ABORTED); |
| 667 | return -EIO; |
| 668 | } |
| 669 | |
| 670 | static void |
| 671 | xlog_cil_push_work( |
| 672 | struct work_struct *work) |
| 673 | { |
| 674 | struct xfs_cil *cil = container_of(work, struct xfs_cil, |
| 675 | xc_push_work); |
| 676 | xlog_cil_push(cil->xc_log); |
| 677 | } |
| 678 | |
| 679 | /* |
| 680 | * We need to push CIL every so often so we don't cache more than we can fit in |
| 681 | * the log. The limit really is that a checkpoint can't be more than half the |
| 682 | * log (the current checkpoint is not allowed to overwrite the previous |
| 683 | * checkpoint), but commit latency and memory usage limit this to a smaller |
| 684 | * size. |
| 685 | */ |
| 686 | static void |
| 687 | xlog_cil_push_background( |
| 688 | struct xlog *log) |
| 689 | { |
| 690 | struct xfs_cil *cil = log->l_cilp; |
| 691 | |
| 692 | /* |
| 693 | * The cil won't be empty because we are called while holding the |
| 694 | * context lock so whatever we added to the CIL will still be there |
| 695 | */ |
| 696 | ASSERT(!list_empty(&cil->xc_cil)); |
| 697 | |
| 698 | /* |
| 699 | * don't do a background push if we haven't used up all the |
| 700 | * space available yet. |
| 701 | */ |
| 702 | if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) |
| 703 | return; |
| 704 | |
| 705 | spin_lock(&cil->xc_push_lock); |
| 706 | if (cil->xc_push_seq < cil->xc_current_sequence) { |
| 707 | cil->xc_push_seq = cil->xc_current_sequence; |
| 708 | queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); |
| 709 | } |
| 710 | spin_unlock(&cil->xc_push_lock); |
| 711 | |
| 712 | } |
| 713 | |
| 714 | /* |
| 715 | * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence |
| 716 | * number that is passed. When it returns, the work will be queued for |
| 717 | * @push_seq, but it won't be completed. The caller is expected to do any |
| 718 | * waiting for push_seq to complete if it is required. |
| 719 | */ |
| 720 | static void |
| 721 | xlog_cil_push_now( |
| 722 | struct xlog *log, |
| 723 | xfs_lsn_t push_seq) |
| 724 | { |
| 725 | struct xfs_cil *cil = log->l_cilp; |
| 726 | |
| 727 | if (!cil) |
| 728 | return; |
| 729 | |
| 730 | ASSERT(push_seq && push_seq <= cil->xc_current_sequence); |
| 731 | |
| 732 | /* start on any pending background push to minimise wait time on it */ |
| 733 | flush_work(&cil->xc_push_work); |
| 734 | |
| 735 | /* |
| 736 | * If the CIL is empty or we've already pushed the sequence then |
| 737 | * there's no work we need to do. |
| 738 | */ |
| 739 | spin_lock(&cil->xc_push_lock); |
| 740 | if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { |
| 741 | spin_unlock(&cil->xc_push_lock); |
| 742 | return; |
| 743 | } |
| 744 | |
| 745 | cil->xc_push_seq = push_seq; |
| 746 | queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); |
| 747 | spin_unlock(&cil->xc_push_lock); |
| 748 | } |
| 749 | |
| 750 | bool |
| 751 | xlog_cil_empty( |
| 752 | struct xlog *log) |
| 753 | { |
| 754 | struct xfs_cil *cil = log->l_cilp; |
| 755 | bool empty = false; |
| 756 | |
| 757 | spin_lock(&cil->xc_push_lock); |
| 758 | if (list_empty(&cil->xc_cil)) |
| 759 | empty = true; |
| 760 | spin_unlock(&cil->xc_push_lock); |
| 761 | return empty; |
| 762 | } |
| 763 | |
| 764 | /* |
| 765 | * Commit a transaction with the given vector to the Committed Item List. |
| 766 | * |
| 767 | * To do this, we need to format the item, pin it in memory if required and |
| 768 | * account for the space used by the transaction. Once we have done that we |
| 769 | * need to release the unused reservation for the transaction, attach the |
| 770 | * transaction to the checkpoint context so we carry the busy extents through |
| 771 | * to checkpoint completion, and then unlock all the items in the transaction. |
| 772 | * |
| 773 | * Called with the context lock already held in read mode to lock out |
| 774 | * background commit, returns without it held once background commits are |
| 775 | * allowed again. |
| 776 | */ |
| 777 | void |
| 778 | xfs_log_commit_cil( |
| 779 | struct xfs_mount *mp, |
| 780 | struct xfs_trans *tp, |
| 781 | xfs_lsn_t *commit_lsn, |
| 782 | bool regrant) |
| 783 | { |
| 784 | struct xlog *log = mp->m_log; |
| 785 | struct xfs_cil *cil = log->l_cilp; |
| 786 | |
| 787 | /* lock out background commit */ |
| 788 | down_read(&cil->xc_ctx_lock); |
| 789 | |
| 790 | xlog_cil_insert_items(log, tp); |
| 791 | |
| 792 | /* check we didn't blow the reservation */ |
| 793 | if (tp->t_ticket->t_curr_res < 0) |
| 794 | xlog_print_tic_res(mp, tp->t_ticket); |
| 795 | |
| 796 | tp->t_commit_lsn = cil->xc_ctx->sequence; |
| 797 | if (commit_lsn) |
| 798 | *commit_lsn = tp->t_commit_lsn; |
| 799 | |
| 800 | xfs_log_done(mp, tp->t_ticket, NULL, regrant); |
| 801 | xfs_trans_unreserve_and_mod_sb(tp); |
| 802 | |
| 803 | /* |
| 804 | * Once all the items of the transaction have been copied to the CIL, |
| 805 | * the items can be unlocked and freed. |
| 806 | * |
| 807 | * This needs to be done before we drop the CIL context lock because we |
| 808 | * have to update state in the log items and unlock them before they go |
| 809 | * to disk. If we don't, then the CIL checkpoint can race with us and |
| 810 | * we can run checkpoint completion before we've updated and unlocked |
| 811 | * the log items. This affects (at least) processing of stale buffers, |
| 812 | * inodes and EFIs. |
| 813 | */ |
| 814 | xfs_trans_free_items(tp, tp->t_commit_lsn, false); |
| 815 | |
| 816 | xlog_cil_push_background(log); |
| 817 | |
| 818 | up_read(&cil->xc_ctx_lock); |
| 819 | } |
| 820 | |
| 821 | /* |
| 822 | * Conditionally push the CIL based on the sequence passed in. |
| 823 | * |
| 824 | * We only need to push if we haven't already pushed the sequence |
| 825 | * number given. Hence the only time we will trigger a push here is |
| 826 | * if the push sequence is the same as the current context. |
| 827 | * |
| 828 | * We return the current commit lsn to allow the callers to determine if a |
| 829 | * iclog flush is necessary following this call. |
| 830 | */ |
| 831 | xfs_lsn_t |
| 832 | xlog_cil_force_lsn( |
| 833 | struct xlog *log, |
| 834 | xfs_lsn_t sequence) |
| 835 | { |
| 836 | struct xfs_cil *cil = log->l_cilp; |
| 837 | struct xfs_cil_ctx *ctx; |
| 838 | xfs_lsn_t commit_lsn = NULLCOMMITLSN; |
| 839 | |
| 840 | ASSERT(sequence <= cil->xc_current_sequence); |
| 841 | |
| 842 | /* |
| 843 | * check to see if we need to force out the current context. |
| 844 | * xlog_cil_push() handles racing pushes for the same sequence, |
| 845 | * so no need to deal with it here. |
| 846 | */ |
| 847 | restart: |
| 848 | xlog_cil_push_now(log, sequence); |
| 849 | |
| 850 | /* |
| 851 | * See if we can find a previous sequence still committing. |
| 852 | * We need to wait for all previous sequence commits to complete |
| 853 | * before allowing the force of push_seq to go ahead. Hence block |
| 854 | * on commits for those as well. |
| 855 | */ |
| 856 | spin_lock(&cil->xc_push_lock); |
| 857 | list_for_each_entry(ctx, &cil->xc_committing, committing) { |
| 858 | /* |
| 859 | * Avoid getting stuck in this loop because we were woken by the |
| 860 | * shutdown, but then went back to sleep once already in the |
| 861 | * shutdown state. |
| 862 | */ |
| 863 | if (XLOG_FORCED_SHUTDOWN(log)) |
| 864 | goto out_shutdown; |
| 865 | if (ctx->sequence > sequence) |
| 866 | continue; |
| 867 | if (!ctx->commit_lsn) { |
| 868 | /* |
| 869 | * It is still being pushed! Wait for the push to |
| 870 | * complete, then start again from the beginning. |
| 871 | */ |
| 872 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); |
| 873 | goto restart; |
| 874 | } |
| 875 | if (ctx->sequence != sequence) |
| 876 | continue; |
| 877 | /* found it! */ |
| 878 | commit_lsn = ctx->commit_lsn; |
| 879 | } |
| 880 | |
| 881 | /* |
| 882 | * The call to xlog_cil_push_now() executes the push in the background. |
| 883 | * Hence by the time we have got here it our sequence may not have been |
| 884 | * pushed yet. This is true if the current sequence still matches the |
| 885 | * push sequence after the above wait loop and the CIL still contains |
| 886 | * dirty objects. This is guaranteed by the push code first adding the |
| 887 | * context to the committing list before emptying the CIL. |
| 888 | * |
| 889 | * Hence if we don't find the context in the committing list and the |
| 890 | * current sequence number is unchanged then the CIL contents are |
| 891 | * significant. If the CIL is empty, if means there was nothing to push |
| 892 | * and that means there is nothing to wait for. If the CIL is not empty, |
| 893 | * it means we haven't yet started the push, because if it had started |
| 894 | * we would have found the context on the committing list. |
| 895 | */ |
| 896 | if (sequence == cil->xc_current_sequence && |
| 897 | !list_empty(&cil->xc_cil)) { |
| 898 | spin_unlock(&cil->xc_push_lock); |
| 899 | goto restart; |
| 900 | } |
| 901 | |
| 902 | spin_unlock(&cil->xc_push_lock); |
| 903 | return commit_lsn; |
| 904 | |
| 905 | /* |
| 906 | * We detected a shutdown in progress. We need to trigger the log force |
| 907 | * to pass through it's iclog state machine error handling, even though |
| 908 | * we are already in a shutdown state. Hence we can't return |
| 909 | * NULLCOMMITLSN here as that has special meaning to log forces (i.e. |
| 910 | * LSN is already stable), so we return a zero LSN instead. |
| 911 | */ |
| 912 | out_shutdown: |
| 913 | spin_unlock(&cil->xc_push_lock); |
| 914 | return 0; |
| 915 | } |
| 916 | |
| 917 | /* |
| 918 | * Check if the current log item was first committed in this sequence. |
| 919 | * We can't rely on just the log item being in the CIL, we have to check |
| 920 | * the recorded commit sequence number. |
| 921 | * |
| 922 | * Note: for this to be used in a non-racy manner, it has to be called with |
| 923 | * CIL flushing locked out. As a result, it should only be used during the |
| 924 | * transaction commit process when deciding what to format into the item. |
| 925 | */ |
| 926 | bool |
| 927 | xfs_log_item_in_current_chkpt( |
| 928 | struct xfs_log_item *lip) |
| 929 | { |
| 930 | struct xfs_cil_ctx *ctx; |
| 931 | |
| 932 | if (list_empty(&lip->li_cil)) |
| 933 | return false; |
| 934 | |
| 935 | ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; |
| 936 | |
| 937 | /* |
| 938 | * li_seq is written on the first commit of a log item to record the |
| 939 | * first checkpoint it is written to. Hence if it is different to the |
| 940 | * current sequence, we're in a new checkpoint. |
| 941 | */ |
| 942 | if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) |
| 943 | return false; |
| 944 | return true; |
| 945 | } |
| 946 | |
| 947 | /* |
| 948 | * Perform initial CIL structure initialisation. |
| 949 | */ |
| 950 | int |
| 951 | xlog_cil_init( |
| 952 | struct xlog *log) |
| 953 | { |
| 954 | struct xfs_cil *cil; |
| 955 | struct xfs_cil_ctx *ctx; |
| 956 | |
| 957 | cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); |
| 958 | if (!cil) |
| 959 | return -ENOMEM; |
| 960 | |
| 961 | ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); |
| 962 | if (!ctx) { |
| 963 | kmem_free(cil); |
| 964 | return -ENOMEM; |
| 965 | } |
| 966 | |
| 967 | INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); |
| 968 | INIT_LIST_HEAD(&cil->xc_cil); |
| 969 | INIT_LIST_HEAD(&cil->xc_committing); |
| 970 | spin_lock_init(&cil->xc_cil_lock); |
| 971 | spin_lock_init(&cil->xc_push_lock); |
| 972 | init_rwsem(&cil->xc_ctx_lock); |
| 973 | init_waitqueue_head(&cil->xc_commit_wait); |
| 974 | |
| 975 | INIT_LIST_HEAD(&ctx->committing); |
| 976 | INIT_LIST_HEAD(&ctx->busy_extents); |
| 977 | ctx->sequence = 1; |
| 978 | ctx->cil = cil; |
| 979 | cil->xc_ctx = ctx; |
| 980 | cil->xc_current_sequence = ctx->sequence; |
| 981 | |
| 982 | cil->xc_log = log; |
| 983 | log->l_cilp = cil; |
| 984 | return 0; |
| 985 | } |
| 986 | |
| 987 | void |
| 988 | xlog_cil_destroy( |
| 989 | struct xlog *log) |
| 990 | { |
| 991 | if (log->l_cilp->xc_ctx) { |
| 992 | if (log->l_cilp->xc_ctx->ticket) |
| 993 | xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); |
| 994 | kmem_free(log->l_cilp->xc_ctx); |
| 995 | } |
| 996 | |
| 997 | ASSERT(list_empty(&log->l_cilp->xc_cil)); |
| 998 | kmem_free(log->l_cilp); |
| 999 | } |
| 1000 | |