Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Ram backed block device driver. |
| 3 | * |
| 4 | * Copyright (C) 2007 Nick Piggin |
| 5 | * Copyright (C) 2007 Novell Inc. |
| 6 | * |
| 7 | * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright |
| 8 | * of their respective owners. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/moduleparam.h> |
| 14 | #include <linux/major.h> |
| 15 | #include <linux/blkdev.h> |
| 16 | #include <linux/bio.h> |
| 17 | #include <linux/highmem.h> |
| 18 | #include <linux/mutex.h> |
| 19 | #include <linux/radix-tree.h> |
| 20 | #include <linux/fs.h> |
| 21 | #include <linux/slab.h> |
| 22 | |
| 23 | #include <asm/uaccess.h> |
| 24 | |
| 25 | #define SECTOR_SHIFT 9 |
| 26 | #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) |
| 27 | #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) |
| 28 | |
| 29 | /* |
| 30 | * Each block ramdisk device has a radix_tree brd_pages of pages that stores |
| 31 | * the pages containing the block device's contents. A brd page's ->index is |
| 32 | * its offset in PAGE_SIZE units. This is similar to, but in no way connected |
| 33 | * with, the kernel's pagecache or buffer cache (which sit above our block |
| 34 | * device). |
| 35 | */ |
| 36 | struct brd_device { |
| 37 | int brd_number; |
| 38 | |
| 39 | struct request_queue *brd_queue; |
| 40 | struct gendisk *brd_disk; |
| 41 | struct list_head brd_list; |
| 42 | |
| 43 | /* |
| 44 | * Backing store of pages and lock to protect it. This is the contents |
| 45 | * of the block device. |
| 46 | */ |
| 47 | spinlock_t brd_lock; |
| 48 | struct radix_tree_root brd_pages; |
| 49 | }; |
| 50 | |
| 51 | /* |
| 52 | * Look up and return a brd's page for a given sector. |
| 53 | */ |
| 54 | static DEFINE_MUTEX(brd_mutex); |
| 55 | static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) |
| 56 | { |
| 57 | pgoff_t idx; |
| 58 | struct page *page; |
| 59 | |
| 60 | /* |
| 61 | * The page lifetime is protected by the fact that we have opened the |
| 62 | * device node -- brd pages will never be deleted under us, so we |
| 63 | * don't need any further locking or refcounting. |
| 64 | * |
| 65 | * This is strictly true for the radix-tree nodes as well (ie. we |
| 66 | * don't actually need the rcu_read_lock()), however that is not a |
| 67 | * documented feature of the radix-tree API so it is better to be |
| 68 | * safe here (we don't have total exclusion from radix tree updates |
| 69 | * here, only deletes). |
| 70 | */ |
| 71 | rcu_read_lock(); |
| 72 | idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ |
| 73 | page = radix_tree_lookup(&brd->brd_pages, idx); |
| 74 | rcu_read_unlock(); |
| 75 | |
| 76 | BUG_ON(page && page->index != idx); |
| 77 | |
| 78 | return page; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Look up and return a brd's page for a given sector. |
| 83 | * If one does not exist, allocate an empty page, and insert that. Then |
| 84 | * return it. |
| 85 | */ |
| 86 | static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) |
| 87 | { |
| 88 | pgoff_t idx; |
| 89 | struct page *page; |
| 90 | gfp_t gfp_flags; |
| 91 | |
| 92 | page = brd_lookup_page(brd, sector); |
| 93 | if (page) |
| 94 | return page; |
| 95 | |
| 96 | /* |
| 97 | * Must use NOIO because we don't want to recurse back into the |
| 98 | * block or filesystem layers from page reclaim. |
| 99 | * |
| 100 | * Cannot support DAX and highmem, because our ->direct_access |
| 101 | * routine for DAX must return memory that is always addressable. |
| 102 | * If DAX was reworked to use pfns and kmap throughout, this |
| 103 | * restriction might be able to be lifted. |
| 104 | */ |
| 105 | gfp_flags = GFP_NOIO | __GFP_ZERO; |
| 106 | #ifndef CONFIG_BLK_DEV_RAM_DAX |
| 107 | gfp_flags |= __GFP_HIGHMEM; |
| 108 | #endif |
| 109 | page = alloc_page(gfp_flags); |
| 110 | if (!page) |
| 111 | return NULL; |
| 112 | |
| 113 | if (radix_tree_preload(GFP_NOIO)) { |
| 114 | __free_page(page); |
| 115 | return NULL; |
| 116 | } |
| 117 | |
| 118 | spin_lock(&brd->brd_lock); |
| 119 | idx = sector >> PAGE_SECTORS_SHIFT; |
| 120 | page->index = idx; |
| 121 | if (radix_tree_insert(&brd->brd_pages, idx, page)) { |
| 122 | __free_page(page); |
| 123 | page = radix_tree_lookup(&brd->brd_pages, idx); |
| 124 | BUG_ON(!page); |
| 125 | BUG_ON(page->index != idx); |
| 126 | } |
| 127 | spin_unlock(&brd->brd_lock); |
| 128 | |
| 129 | radix_tree_preload_end(); |
| 130 | |
| 131 | return page; |
| 132 | } |
| 133 | |
| 134 | static void brd_free_page(struct brd_device *brd, sector_t sector) |
| 135 | { |
| 136 | struct page *page; |
| 137 | pgoff_t idx; |
| 138 | |
| 139 | spin_lock(&brd->brd_lock); |
| 140 | idx = sector >> PAGE_SECTORS_SHIFT; |
| 141 | page = radix_tree_delete(&brd->brd_pages, idx); |
| 142 | spin_unlock(&brd->brd_lock); |
| 143 | if (page) |
| 144 | __free_page(page); |
| 145 | } |
| 146 | |
| 147 | static void brd_zero_page(struct brd_device *brd, sector_t sector) |
| 148 | { |
| 149 | struct page *page; |
| 150 | |
| 151 | page = brd_lookup_page(brd, sector); |
| 152 | if (page) |
| 153 | clear_highpage(page); |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * Free all backing store pages and radix tree. This must only be called when |
| 158 | * there are no other users of the device. |
| 159 | */ |
| 160 | #define FREE_BATCH 16 |
| 161 | static void brd_free_pages(struct brd_device *brd) |
| 162 | { |
| 163 | unsigned long pos = 0; |
| 164 | struct page *pages[FREE_BATCH]; |
| 165 | int nr_pages; |
| 166 | |
| 167 | do { |
| 168 | int i; |
| 169 | |
| 170 | nr_pages = radix_tree_gang_lookup(&brd->brd_pages, |
| 171 | (void **)pages, pos, FREE_BATCH); |
| 172 | |
| 173 | for (i = 0; i < nr_pages; i++) { |
| 174 | void *ret; |
| 175 | |
| 176 | BUG_ON(pages[i]->index < pos); |
| 177 | pos = pages[i]->index; |
| 178 | ret = radix_tree_delete(&brd->brd_pages, pos); |
| 179 | BUG_ON(!ret || ret != pages[i]); |
| 180 | __free_page(pages[i]); |
| 181 | } |
| 182 | |
| 183 | pos++; |
| 184 | |
| 185 | /* |
| 186 | * This assumes radix_tree_gang_lookup always returns as |
| 187 | * many pages as possible. If the radix-tree code changes, |
| 188 | * so will this have to. |
| 189 | */ |
| 190 | } while (nr_pages == FREE_BATCH); |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * copy_to_brd_setup must be called before copy_to_brd. It may sleep. |
| 195 | */ |
| 196 | static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) |
| 197 | { |
| 198 | unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; |
| 199 | size_t copy; |
| 200 | |
| 201 | copy = min_t(size_t, n, PAGE_SIZE - offset); |
| 202 | if (!brd_insert_page(brd, sector)) |
| 203 | return -ENOSPC; |
| 204 | if (copy < n) { |
| 205 | sector += copy >> SECTOR_SHIFT; |
| 206 | if (!brd_insert_page(brd, sector)) |
| 207 | return -ENOSPC; |
| 208 | } |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | static void discard_from_brd(struct brd_device *brd, |
| 213 | sector_t sector, size_t n) |
| 214 | { |
| 215 | while (n >= PAGE_SIZE) { |
| 216 | /* |
| 217 | * Don't want to actually discard pages here because |
| 218 | * re-allocating the pages can result in writeback |
| 219 | * deadlocks under heavy load. |
| 220 | */ |
| 221 | if (0) |
| 222 | brd_free_page(brd, sector); |
| 223 | else |
| 224 | brd_zero_page(brd, sector); |
| 225 | sector += PAGE_SIZE >> SECTOR_SHIFT; |
| 226 | n -= PAGE_SIZE; |
| 227 | } |
| 228 | } |
| 229 | |
| 230 | /* |
| 231 | * Copy n bytes from src to the brd starting at sector. Does not sleep. |
| 232 | */ |
| 233 | static void copy_to_brd(struct brd_device *brd, const void *src, |
| 234 | sector_t sector, size_t n) |
| 235 | { |
| 236 | struct page *page; |
| 237 | void *dst; |
| 238 | unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; |
| 239 | size_t copy; |
| 240 | |
| 241 | copy = min_t(size_t, n, PAGE_SIZE - offset); |
| 242 | page = brd_lookup_page(brd, sector); |
| 243 | BUG_ON(!page); |
| 244 | |
| 245 | dst = kmap_atomic(page); |
| 246 | memcpy(dst + offset, src, copy); |
| 247 | kunmap_atomic(dst); |
| 248 | |
| 249 | if (copy < n) { |
| 250 | src += copy; |
| 251 | sector += copy >> SECTOR_SHIFT; |
| 252 | copy = n - copy; |
| 253 | page = brd_lookup_page(brd, sector); |
| 254 | BUG_ON(!page); |
| 255 | |
| 256 | dst = kmap_atomic(page); |
| 257 | memcpy(dst, src, copy); |
| 258 | kunmap_atomic(dst); |
| 259 | } |
| 260 | } |
| 261 | |
| 262 | /* |
| 263 | * Copy n bytes to dst from the brd starting at sector. Does not sleep. |
| 264 | */ |
| 265 | static void copy_from_brd(void *dst, struct brd_device *brd, |
| 266 | sector_t sector, size_t n) |
| 267 | { |
| 268 | struct page *page; |
| 269 | void *src; |
| 270 | unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; |
| 271 | size_t copy; |
| 272 | |
| 273 | copy = min_t(size_t, n, PAGE_SIZE - offset); |
| 274 | page = brd_lookup_page(brd, sector); |
| 275 | if (page) { |
| 276 | src = kmap_atomic(page); |
| 277 | memcpy(dst, src + offset, copy); |
| 278 | kunmap_atomic(src); |
| 279 | } else |
| 280 | memset(dst, 0, copy); |
| 281 | |
| 282 | if (copy < n) { |
| 283 | dst += copy; |
| 284 | sector += copy >> SECTOR_SHIFT; |
| 285 | copy = n - copy; |
| 286 | page = brd_lookup_page(brd, sector); |
| 287 | if (page) { |
| 288 | src = kmap_atomic(page); |
| 289 | memcpy(dst, src, copy); |
| 290 | kunmap_atomic(src); |
| 291 | } else |
| 292 | memset(dst, 0, copy); |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * Process a single bvec of a bio. |
| 298 | */ |
| 299 | static int brd_do_bvec(struct brd_device *brd, struct page *page, |
| 300 | unsigned int len, unsigned int off, int rw, |
| 301 | sector_t sector) |
| 302 | { |
| 303 | void *mem; |
| 304 | int err = 0; |
| 305 | |
| 306 | if (rw != READ) { |
| 307 | err = copy_to_brd_setup(brd, sector, len); |
| 308 | if (err) |
| 309 | goto out; |
| 310 | } |
| 311 | |
| 312 | mem = kmap_atomic(page); |
| 313 | if (rw == READ) { |
| 314 | copy_from_brd(mem + off, brd, sector, len); |
| 315 | flush_dcache_page(page); |
| 316 | } else { |
| 317 | flush_dcache_page(page); |
| 318 | copy_to_brd(brd, mem + off, sector, len); |
| 319 | } |
| 320 | kunmap_atomic(mem); |
| 321 | |
| 322 | out: |
| 323 | return err; |
| 324 | } |
| 325 | |
| 326 | static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) |
| 327 | { |
| 328 | struct block_device *bdev = bio->bi_bdev; |
| 329 | struct brd_device *brd = bdev->bd_disk->private_data; |
| 330 | int rw; |
| 331 | struct bio_vec bvec; |
| 332 | sector_t sector; |
| 333 | struct bvec_iter iter; |
| 334 | |
| 335 | sector = bio->bi_iter.bi_sector; |
| 336 | if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) |
| 337 | goto io_error; |
| 338 | |
| 339 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
| 340 | if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || |
| 341 | bio->bi_iter.bi_size & ~PAGE_MASK) |
| 342 | goto io_error; |
| 343 | discard_from_brd(brd, sector, bio->bi_iter.bi_size); |
| 344 | goto out; |
| 345 | } |
| 346 | |
| 347 | rw = bio_rw(bio); |
| 348 | if (rw == READA) |
| 349 | rw = READ; |
| 350 | |
| 351 | bio_for_each_segment(bvec, bio, iter) { |
| 352 | unsigned int len = bvec.bv_len; |
| 353 | int err; |
| 354 | |
| 355 | err = brd_do_bvec(brd, bvec.bv_page, len, |
| 356 | bvec.bv_offset, rw, sector); |
| 357 | if (err) |
| 358 | goto io_error; |
| 359 | sector += len >> SECTOR_SHIFT; |
| 360 | } |
| 361 | |
| 362 | out: |
| 363 | bio_endio(bio); |
| 364 | return BLK_QC_T_NONE; |
| 365 | io_error: |
| 366 | bio_io_error(bio); |
| 367 | return BLK_QC_T_NONE; |
| 368 | } |
| 369 | |
| 370 | static int brd_rw_page(struct block_device *bdev, sector_t sector, |
| 371 | struct page *page, int rw) |
| 372 | { |
| 373 | struct brd_device *brd = bdev->bd_disk->private_data; |
| 374 | int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); |
| 375 | page_endio(page, rw & WRITE, err); |
| 376 | return err; |
| 377 | } |
| 378 | |
| 379 | #ifdef CONFIG_BLK_DEV_RAM_DAX |
| 380 | static long brd_direct_access(struct block_device *bdev, sector_t sector, |
| 381 | void __pmem **kaddr, unsigned long *pfn) |
| 382 | { |
| 383 | struct brd_device *brd = bdev->bd_disk->private_data; |
| 384 | struct page *page; |
| 385 | |
| 386 | if (!brd) |
| 387 | return -ENODEV; |
| 388 | page = brd_insert_page(brd, sector); |
| 389 | if (!page) |
| 390 | return -ENOSPC; |
| 391 | *kaddr = (void __pmem *)page_address(page); |
| 392 | *pfn = page_to_pfn(page); |
| 393 | |
| 394 | return PAGE_SIZE; |
| 395 | } |
| 396 | #else |
| 397 | #define brd_direct_access NULL |
| 398 | #endif |
| 399 | |
| 400 | static int brd_ioctl(struct block_device *bdev, fmode_t mode, |
| 401 | unsigned int cmd, unsigned long arg) |
| 402 | { |
| 403 | int error; |
| 404 | struct brd_device *brd = bdev->bd_disk->private_data; |
| 405 | |
| 406 | if (cmd != BLKFLSBUF) |
| 407 | return -ENOTTY; |
| 408 | |
| 409 | /* |
| 410 | * ram device BLKFLSBUF has special semantics, we want to actually |
| 411 | * release and destroy the ramdisk data. |
| 412 | */ |
| 413 | mutex_lock(&brd_mutex); |
| 414 | mutex_lock(&bdev->bd_mutex); |
| 415 | error = -EBUSY; |
| 416 | if (bdev->bd_openers <= 1) { |
| 417 | /* |
| 418 | * Kill the cache first, so it isn't written back to the |
| 419 | * device. |
| 420 | * |
| 421 | * Another thread might instantiate more buffercache here, |
| 422 | * but there is not much we can do to close that race. |
| 423 | */ |
| 424 | kill_bdev(bdev); |
| 425 | brd_free_pages(brd); |
| 426 | error = 0; |
| 427 | } |
| 428 | mutex_unlock(&bdev->bd_mutex); |
| 429 | mutex_unlock(&brd_mutex); |
| 430 | |
| 431 | return error; |
| 432 | } |
| 433 | |
| 434 | static const struct block_device_operations brd_fops = { |
| 435 | .owner = THIS_MODULE, |
| 436 | .rw_page = brd_rw_page, |
| 437 | .ioctl = brd_ioctl, |
| 438 | .direct_access = brd_direct_access, |
| 439 | }; |
| 440 | |
| 441 | /* |
| 442 | * And now the modules code and kernel interface. |
| 443 | */ |
| 444 | static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT; |
| 445 | module_param(rd_nr, int, S_IRUGO); |
| 446 | MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); |
| 447 | |
| 448 | int rd_size = CONFIG_BLK_DEV_RAM_SIZE; |
| 449 | module_param(rd_size, int, S_IRUGO); |
| 450 | MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); |
| 451 | |
| 452 | static int max_part = 1; |
| 453 | module_param(max_part, int, S_IRUGO); |
| 454 | MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices"); |
| 455 | |
| 456 | MODULE_LICENSE("GPL"); |
| 457 | MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); |
| 458 | MODULE_ALIAS("rd"); |
| 459 | |
| 460 | #ifndef MODULE |
| 461 | /* Legacy boot options - nonmodular */ |
| 462 | static int __init ramdisk_size(char *str) |
| 463 | { |
| 464 | rd_size = simple_strtol(str, NULL, 0); |
| 465 | return 1; |
| 466 | } |
| 467 | __setup("ramdisk_size=", ramdisk_size); |
| 468 | #endif |
| 469 | |
| 470 | /* |
| 471 | * The device scheme is derived from loop.c. Keep them in synch where possible |
| 472 | * (should share code eventually). |
| 473 | */ |
| 474 | static LIST_HEAD(brd_devices); |
| 475 | static DEFINE_MUTEX(brd_devices_mutex); |
| 476 | |
| 477 | static struct brd_device *brd_alloc(int i) |
| 478 | { |
| 479 | struct brd_device *brd; |
| 480 | struct gendisk *disk; |
| 481 | |
| 482 | brd = kzalloc(sizeof(*brd), GFP_KERNEL); |
| 483 | if (!brd) |
| 484 | goto out; |
| 485 | brd->brd_number = i; |
| 486 | spin_lock_init(&brd->brd_lock); |
| 487 | INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); |
| 488 | |
| 489 | brd->brd_queue = blk_alloc_queue(GFP_KERNEL); |
| 490 | if (!brd->brd_queue) |
| 491 | goto out_free_dev; |
| 492 | |
| 493 | blk_queue_make_request(brd->brd_queue, brd_make_request); |
| 494 | blk_queue_max_hw_sectors(brd->brd_queue, 1024); |
| 495 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
| 496 | |
| 497 | /* This is so fdisk will align partitions on 4k, because of |
| 498 | * direct_access API needing 4k alignment, returning a PFN |
| 499 | * (This is only a problem on very small devices <= 4M, |
| 500 | * otherwise fdisk will align on 1M. Regardless this call |
| 501 | * is harmless) |
| 502 | */ |
| 503 | blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); |
| 504 | |
| 505 | brd->brd_queue->limits.discard_granularity = PAGE_SIZE; |
| 506 | blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX); |
| 507 | brd->brd_queue->limits.discard_zeroes_data = 1; |
| 508 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); |
| 509 | |
| 510 | disk = brd->brd_disk = alloc_disk(max_part); |
| 511 | if (!disk) |
| 512 | goto out_free_queue; |
| 513 | disk->major = RAMDISK_MAJOR; |
| 514 | disk->first_minor = i * max_part; |
| 515 | disk->fops = &brd_fops; |
| 516 | disk->private_data = brd; |
| 517 | disk->queue = brd->brd_queue; |
| 518 | disk->flags = GENHD_FL_EXT_DEVT; |
| 519 | sprintf(disk->disk_name, "ram%d", i); |
| 520 | set_capacity(disk, rd_size * 2); |
| 521 | |
| 522 | return brd; |
| 523 | |
| 524 | out_free_queue: |
| 525 | blk_cleanup_queue(brd->brd_queue); |
| 526 | out_free_dev: |
| 527 | kfree(brd); |
| 528 | out: |
| 529 | return NULL; |
| 530 | } |
| 531 | |
| 532 | static void brd_free(struct brd_device *brd) |
| 533 | { |
| 534 | put_disk(brd->brd_disk); |
| 535 | blk_cleanup_queue(brd->brd_queue); |
| 536 | brd_free_pages(brd); |
| 537 | kfree(brd); |
| 538 | } |
| 539 | |
| 540 | static struct brd_device *brd_init_one(int i, bool *new) |
| 541 | { |
| 542 | struct brd_device *brd; |
| 543 | |
| 544 | *new = false; |
| 545 | list_for_each_entry(brd, &brd_devices, brd_list) { |
| 546 | if (brd->brd_number == i) |
| 547 | goto out; |
| 548 | } |
| 549 | |
| 550 | brd = brd_alloc(i); |
| 551 | if (brd) { |
| 552 | add_disk(brd->brd_disk); |
| 553 | list_add_tail(&brd->brd_list, &brd_devices); |
| 554 | } |
| 555 | *new = true; |
| 556 | out: |
| 557 | return brd; |
| 558 | } |
| 559 | |
| 560 | static void brd_del_one(struct brd_device *brd) |
| 561 | { |
| 562 | list_del(&brd->brd_list); |
| 563 | del_gendisk(brd->brd_disk); |
| 564 | brd_free(brd); |
| 565 | } |
| 566 | |
| 567 | static struct kobject *brd_probe(dev_t dev, int *part, void *data) |
| 568 | { |
| 569 | struct brd_device *brd; |
| 570 | struct kobject *kobj; |
| 571 | bool new; |
| 572 | |
| 573 | mutex_lock(&brd_devices_mutex); |
| 574 | brd = brd_init_one(MINOR(dev) / max_part, &new); |
| 575 | kobj = brd ? get_disk(brd->brd_disk) : NULL; |
| 576 | mutex_unlock(&brd_devices_mutex); |
| 577 | |
| 578 | if (new) |
| 579 | *part = 0; |
| 580 | |
| 581 | return kobj; |
| 582 | } |
| 583 | |
| 584 | static int __init brd_init(void) |
| 585 | { |
| 586 | struct brd_device *brd, *next; |
| 587 | int i; |
| 588 | |
| 589 | /* |
| 590 | * brd module now has a feature to instantiate underlying device |
| 591 | * structure on-demand, provided that there is an access dev node. |
| 592 | * |
| 593 | * (1) if rd_nr is specified, create that many upfront. else |
| 594 | * it defaults to CONFIG_BLK_DEV_RAM_COUNT |
| 595 | * (2) User can further extend brd devices by create dev node themselves |
| 596 | * and have kernel automatically instantiate actual device |
| 597 | * on-demand. Example: |
| 598 | * mknod /path/devnod_name b 1 X # 1 is the rd major |
| 599 | * fdisk -l /path/devnod_name |
| 600 | * If (X / max_part) was not already created it will be created |
| 601 | * dynamically. |
| 602 | */ |
| 603 | |
| 604 | if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) |
| 605 | return -EIO; |
| 606 | |
| 607 | if (unlikely(!max_part)) |
| 608 | max_part = 1; |
| 609 | |
| 610 | for (i = 0; i < rd_nr; i++) { |
| 611 | brd = brd_alloc(i); |
| 612 | if (!brd) |
| 613 | goto out_free; |
| 614 | list_add_tail(&brd->brd_list, &brd_devices); |
| 615 | } |
| 616 | |
| 617 | /* point of no return */ |
| 618 | |
| 619 | list_for_each_entry(brd, &brd_devices, brd_list) |
| 620 | add_disk(brd->brd_disk); |
| 621 | |
| 622 | blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS, |
| 623 | THIS_MODULE, brd_probe, NULL, NULL); |
| 624 | |
| 625 | pr_info("brd: module loaded\n"); |
| 626 | return 0; |
| 627 | |
| 628 | out_free: |
| 629 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { |
| 630 | list_del(&brd->brd_list); |
| 631 | brd_free(brd); |
| 632 | } |
| 633 | unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); |
| 634 | |
| 635 | pr_info("brd: module NOT loaded !!!\n"); |
| 636 | return -ENOMEM; |
| 637 | } |
| 638 | |
| 639 | static void __exit brd_exit(void) |
| 640 | { |
| 641 | struct brd_device *brd, *next; |
| 642 | |
| 643 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) |
| 644 | brd_del_one(brd); |
| 645 | |
| 646 | blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS); |
| 647 | unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); |
| 648 | |
| 649 | pr_info("brd: module unloaded\n"); |
| 650 | } |
| 651 | |
| 652 | module_init(brd_init); |
| 653 | module_exit(brd_exit); |
| 654 | |