Changes in uspace/lib/libblock/libblock.c [d68e4d5:0da4e41] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/libblock/libblock.c
rd68e4d5 r0da4e41 50 50 #include <adt/list.h> 51 51 #include <adt/hash_table.h> 52 #include <macros.h> 52 53 #include <mem.h> 53 54 … … 62 63 typedef struct { 63 64 fibril_mutex_t lock; 64 size_t block_size; /**< Block size. */65 size_t lblock_size; /**< Logical block size. */ 65 66 unsigned block_count; /**< Total number of blocks. */ 66 67 unsigned blocks_cached; /**< Number of cached blocks. */ … … 74 75 dev_handle_t dev_handle; 75 76 int dev_phone; 76 fibril_mutex_t com _area_lock;77 void *com _area;78 size_t com _size;77 fibril_mutex_t comm_area_lock; 78 void *comm_area; 79 size_t comm_size; 79 80 void *bb_buf; 80 off_t bb_off;81 size_t bb_size;81 bn_t bb_addr; 82 size_t pblock_size; /**< Physical block size. */ 82 83 cache_t *cache; 83 84 } devcon_t; 84 85 85 static int read_block(devcon_t *devcon, bn_t boff, size_t block_size); 86 static int write_block(devcon_t *devcon, bn_t boff, size_t block_size); 86 static int read_blocks(devcon_t *devcon, bn_t ba, size_t cnt); 87 static int write_blocks(devcon_t *devcon, bn_t ba, size_t cnt); 88 static int get_block_size(int dev_phone, size_t *bsize); 87 89 88 90 static devcon_t *devcon_search(dev_handle_t dev_handle) … … 102 104 } 103 105 104 static int devcon_add(dev_handle_t dev_handle, int dev_phone, void *com_area,105 size_t com_size)106 static int devcon_add(dev_handle_t dev_handle, int dev_phone, size_t bsize, 107 void *comm_area, size_t comm_size) 106 108 { 107 109 link_t *cur; 108 110 devcon_t *devcon; 111 112 if (comm_size < bsize) 113 return EINVAL; 109 114 110 115 devcon = malloc(sizeof(devcon_t)); … … 115 120 devcon->dev_handle = dev_handle; 116 121 devcon->dev_phone = dev_phone; 117 fibril_mutex_initialize(&devcon->com _area_lock);118 devcon->com _area = com_area;119 devcon->com _size = com_size;122 fibril_mutex_initialize(&devcon->comm_area_lock); 123 devcon->comm_area = comm_area; 124 devcon->comm_size = comm_size; 120 125 devcon->bb_buf = NULL; 121 devcon->bb_ off= 0;122 devcon-> bb_size = 0;126 devcon->bb_addr = 0; 127 devcon->pblock_size = bsize; 123 128 devcon->cache = NULL; 124 129 … … 144 149 } 145 150 146 int block_init(dev_handle_t dev_handle, size_t com _size)151 int block_init(dev_handle_t dev_handle, size_t comm_size) 147 152 { 148 153 int rc; 149 154 int dev_phone; 150 void *com_area; 151 152 com_area = mmap(NULL, com_size, PROTO_READ | PROTO_WRITE, 155 void *comm_area; 156 size_t bsize; 157 158 comm_area = mmap(NULL, comm_size, PROTO_READ | PROTO_WRITE, 153 159 MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); 154 if (!com _area) {160 if (!comm_area) { 155 161 return ENOMEM; 156 162 } … … 158 164 dev_phone = devmap_device_connect(dev_handle, IPC_FLAG_BLOCKING); 159 165 if (dev_phone < 0) { 160 munmap(com _area, com_size);166 munmap(comm_area, comm_size); 161 167 return dev_phone; 162 168 } 163 169 164 rc = ipc_share_out_start(dev_phone, com_area,170 rc = async_share_out_start(dev_phone, comm_area, 165 171 AS_AREA_READ | AS_AREA_WRITE); 166 172 if (rc != EOK) { 167 munmap(com _area, com_size);173 munmap(comm_area, comm_size); 168 174 ipc_hangup(dev_phone); 169 175 return rc; 170 176 } 171 172 rc = devcon_add(dev_handle, dev_phone, com_area, com_size); 177 178 if (get_block_size(dev_phone, &bsize) != EOK) { 179 munmap(comm_area, comm_size); 180 ipc_hangup(dev_phone); 181 return rc; 182 } 183 184 rc = devcon_add(dev_handle, dev_phone, bsize, comm_area, comm_size); 173 185 if (rc != EOK) { 174 munmap(com _area, com_size);186 munmap(comm_area, comm_size); 175 187 ipc_hangup(dev_phone); 176 188 return rc; … … 195 207 } 196 208 197 munmap(devcon->com _area, devcon->com_size);209 munmap(devcon->comm_area, devcon->comm_size); 198 210 ipc_hangup(devcon->dev_phone); 199 211 … … 201 213 } 202 214 203 int block_bb_read(dev_handle_t dev_handle, off_t off, size_t size)215 int block_bb_read(dev_handle_t dev_handle, bn_t ba) 204 216 { 205 217 void *bb_buf; … … 211 223 if (devcon->bb_buf) 212 224 return EEXIST; 213 bb_buf = malloc( size);225 bb_buf = malloc(devcon->pblock_size); 214 226 if (!bb_buf) 215 227 return ENOMEM; 216 217 fibril_mutex_lock(&devcon->com _area_lock);218 rc = read_block (devcon, 0, size);228 229 fibril_mutex_lock(&devcon->comm_area_lock); 230 rc = read_blocks(devcon, 0, 1); 219 231 if (rc != EOK) { 220 fibril_mutex_unlock(&devcon->com _area_lock);232 fibril_mutex_unlock(&devcon->comm_area_lock); 221 233 free(bb_buf); 222 234 return rc; 223 235 } 224 memcpy(bb_buf, devcon->com _area,size);225 fibril_mutex_unlock(&devcon->com _area_lock);236 memcpy(bb_buf, devcon->comm_area, devcon->pblock_size); 237 fibril_mutex_unlock(&devcon->comm_area_lock); 226 238 227 239 devcon->bb_buf = bb_buf; 228 devcon->bb_off = off; 229 devcon->bb_size = size; 240 devcon->bb_addr = ba; 230 241 231 242 return EOK; … … 275 286 fibril_mutex_initialize(&cache->lock); 276 287 list_initialize(&cache->free_head); 277 cache-> block_size = size;288 cache->lblock_size = size; 278 289 cache->block_count = blocks; 279 290 cache->blocks_cached = 0; 280 291 cache->mode = mode; 292 293 /* No block size translation a.t.m. */ 294 assert(cache->lblock_size == devcon->pblock_size); 281 295 282 296 if (!hash_table_create(&cache->block_hash, CACHE_BUCKETS, 1, … … 306 320 b->refcnt = 1; 307 321 b->dirty = false; 322 b->toxic = false; 308 323 fibril_rwlock_initialize(&b->contents_lock); 309 324 link_initialize(&b->free_link); … … 313 328 /** Instantiate a block in memory and get a reference to it. 314 329 * 330 * @param block Pointer to where the function will store the 331 * block pointer on success. 315 332 * @param dev_handle Device handle of the block device. 316 333 * @param boff Block offset. … … 319 336 * device. 320 337 * 321 * @return Block structure.322 */ 323 block_t *block_get(dev_handle_t dev_handle, bn_t boff, int flags)338 * @return EOK on success or a negative error code. 339 */ 340 int block_get(block_t **block, dev_handle_t dev_handle, bn_t boff, int flags) 324 341 { 325 342 devcon_t *devcon; … … 328 345 link_t *l; 329 346 unsigned long key = boff; 330 bn_t oboff;347 int rc; 331 348 332 349 devcon = devcon_search(dev_handle); … … 336 353 337 354 cache = devcon->cache; 355 356 retry: 357 rc = EOK; 358 b = NULL; 359 338 360 fibril_mutex_lock(&cache->lock); 339 361 l = hash_table_find(&cache->block_hash, &key); … … 346 368 if (b->refcnt++ == 0) 347 369 list_remove(&b->free_link); 370 if (b->toxic) 371 rc = EIO; 348 372 fibril_mutex_unlock(&b->lock); 349 373 fibril_mutex_unlock(&cache->lock); … … 352 376 * The block was not found in the cache. 353 377 */ 354 int rc;355 bool sync = false;356 357 378 if (cache_can_grow(cache)) { 358 379 /* … … 364 385 if (!b) 365 386 goto recycle; 366 b->data = malloc(cache-> block_size);387 b->data = malloc(cache->lblock_size); 367 388 if (!b->data) { 368 389 free(b); … … 376 397 unsigned long temp_key; 377 398 recycle: 378 assert(!list_empty(&cache->free_head)); 399 if (list_empty(&cache->free_head)) { 400 fibril_mutex_unlock(&cache->lock); 401 rc = ENOMEM; 402 goto out; 403 } 379 404 l = cache->free_head.next; 380 list_remove(l);381 405 b = list_get_instance(l, block_t, free_link); 382 sync = b->dirty; 383 oboff = b->boff; 406 407 fibril_mutex_lock(&b->lock); 408 if (b->dirty) { 409 /* 410 * The block needs to be written back to the 411 * device before it changes identity. Do this 412 * while not holding the cache lock so that 413 * concurrency is not impeded. Also move the 414 * block to the end of the free list so that we 415 * do not slow down other instances of 416 * block_get() draining the free list. 417 */ 418 list_remove(&b->free_link); 419 list_append(&b->free_link, &cache->free_head); 420 fibril_mutex_unlock(&cache->lock); 421 fibril_mutex_lock(&devcon->comm_area_lock); 422 memcpy(devcon->comm_area, b->data, b->size); 423 rc = write_blocks(devcon, b->boff, 1); 424 fibril_mutex_unlock(&devcon->comm_area_lock); 425 if (rc != EOK) { 426 /* 427 * We did not manage to write the block 428 * to the device. Keep it around for 429 * another try. Hopefully, we will grab 430 * another block next time. 431 */ 432 fibril_mutex_unlock(&b->lock); 433 goto retry; 434 } 435 b->dirty = false; 436 if (!fibril_mutex_trylock(&cache->lock)) { 437 /* 438 * Somebody is probably racing with us. 439 * Unlock the block and retry. 440 */ 441 fibril_mutex_unlock(&b->lock); 442 goto retry; 443 } 444 445 } 446 fibril_mutex_unlock(&b->lock); 447 448 /* 449 * Unlink the block from the free list and the hash 450 * table. 451 */ 452 list_remove(&b->free_link); 384 453 temp_key = b->boff; 385 454 hash_table_remove(&cache->block_hash, &temp_key, 1); … … 388 457 block_initialize(b); 389 458 b->dev_handle = dev_handle; 390 b->size = cache-> block_size;459 b->size = cache->lblock_size; 391 460 b->boff = boff; 392 461 hash_table_insert(&cache->block_hash, &key, &b->hash_link); … … 394 463 /* 395 464 * Lock the block before releasing the cache lock. Thus we don't 396 * kill concur ent operations on the cache while doing I/O on the397 * block.465 * kill concurrent operations on the cache while doing I/O on 466 * the block. 398 467 */ 399 468 fibril_mutex_lock(&b->lock); 400 469 fibril_mutex_unlock(&cache->lock); 401 470 402 if (sync) {403 /*404 * The block is dirty and needs to be written back to405 * the device before we can read in the new contents.406 */407 fibril_mutex_lock(&devcon->com_area_lock);408 memcpy(devcon->com_area, b->data, b->size);409 rc = write_block(devcon, oboff, cache->block_size);410 assert(rc == EOK);411 fibril_mutex_unlock(&devcon->com_area_lock);412 }413 471 if (!(flags & BLOCK_FLAGS_NOREAD)) { 414 472 /* … … 416 474 * the new contents from the device. 417 475 */ 418 fibril_mutex_lock(&devcon->com_area_lock); 419 rc = read_block(devcon, b->boff, cache->block_size); 420 assert(rc == EOK); 421 memcpy(b->data, devcon->com_area, cache->block_size); 422 fibril_mutex_unlock(&devcon->com_area_lock); 423 } 476 fibril_mutex_lock(&devcon->comm_area_lock); 477 rc = read_blocks(devcon, b->boff, 1); 478 memcpy(b->data, devcon->comm_area, cache->lblock_size); 479 fibril_mutex_unlock(&devcon->comm_area_lock); 480 if (rc != EOK) 481 b->toxic = true; 482 } else 483 rc = EOK; 424 484 425 485 fibril_mutex_unlock(&b->lock); 426 486 } 427 return b; 487 out: 488 if ((rc != EOK) && b) { 489 assert(b->toxic); 490 (void) block_put(b); 491 b = NULL; 492 } 493 *block = b; 494 return rc; 428 495 } 429 496 … … 433 500 * 434 501 * @param block Block of which a reference is to be released. 435 */ 436 void block_put(block_t *block) 502 * 503 * @return EOK on success or a negative error code. 504 */ 505 int block_put(block_t *block) 437 506 { 438 507 devcon_t *devcon = devcon_search(block->dev_handle); 439 508 cache_t *cache; 440 int rc; 509 unsigned blocks_cached; 510 enum cache_mode mode; 511 int rc = EOK; 441 512 442 513 assert(devcon); … … 444 515 445 516 cache = devcon->cache; 517 518 retry: 519 fibril_mutex_lock(&cache->lock); 520 blocks_cached = cache->blocks_cached; 521 mode = cache->mode; 522 fibril_mutex_unlock(&cache->lock); 523 524 /* 525 * Determine whether to sync the block. Syncing the block is best done 526 * when not holding the cache lock as it does not impede concurrency. 527 * Since the situation may have changed when we unlocked the cache, the 528 * blocks_cached and mode variables are mere hints. We will recheck the 529 * conditions later when the cache lock is held again. 530 */ 531 fibril_mutex_lock(&block->lock); 532 if (block->toxic) 533 block->dirty = false; /* will not write back toxic block */ 534 if (block->dirty && (block->refcnt == 1) && 535 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) { 536 fibril_mutex_lock(&devcon->comm_area_lock); 537 memcpy(devcon->comm_area, block->data, block->size); 538 rc = write_blocks(devcon, block->boff, 1); 539 fibril_mutex_unlock(&devcon->comm_area_lock); 540 block->dirty = false; 541 } 542 fibril_mutex_unlock(&block->lock); 543 446 544 fibril_mutex_lock(&cache->lock); 447 545 fibril_mutex_lock(&block->lock); … … 449 547 /* 450 548 * Last reference to the block was dropped. Either free the 451 * block or put it on the free list. 549 * block or put it on the free list. In case of an I/O error, 550 * free the block. 452 551 */ 453 if (cache->blocks_cached > CACHE_HI_WATERMARK) { 552 if ((cache->blocks_cached > CACHE_HI_WATERMARK) || 553 (rc != EOK)) { 454 554 /* 455 * Currently there are too many cached blocks. 555 * Currently there are too many cached blocks or there 556 * was an I/O error when writing the block back to the 557 * device. 456 558 */ 457 559 if (block->dirty) { 458 fibril_mutex_lock(&devcon->com_area_lock); 459 memcpy(devcon->com_area, block->data, 460 block->size); 461 rc = write_block(devcon, block->boff, 462 block->size); 463 assert(rc == EOK); 464 fibril_mutex_unlock(&devcon->com_area_lock); 560 /* 561 * We cannot sync the block while holding the 562 * cache lock. Release everything and retry. 563 */ 564 block->refcnt++; 565 fibril_mutex_unlock(&block->lock); 566 fibril_mutex_unlock(&cache->lock); 567 goto retry; 465 568 } 466 569 /* … … 473 576 cache->blocks_cached--; 474 577 fibril_mutex_unlock(&cache->lock); 475 return ;578 return rc; 476 579 } 477 580 /* 478 581 * Put the block on the free list. 479 582 */ 583 if (cache->mode != CACHE_MODE_WB && block->dirty) { 584 /* 585 * We cannot sync the block while holding the cache 586 * lock. Release everything and retry. 587 */ 588 block->refcnt++; 589 fibril_mutex_unlock(&block->lock); 590 fibril_mutex_unlock(&cache->lock); 591 goto retry; 592 } 480 593 list_append(&block->free_link, &cache->free_head); 481 if (cache->mode != CACHE_MODE_WB && block->dirty) {482 fibril_mutex_lock(&devcon->com_area_lock);483 memcpy(devcon->com_area, block->data, block->size);484 rc = write_block(devcon, block->boff, block->size);485 assert(rc == EOK);486 fibril_mutex_unlock(&devcon->com_area_lock);487 488 block->dirty = false;489 }490 594 } 491 595 fibril_mutex_unlock(&block->lock); 492 596 fibril_mutex_unlock(&cache->lock); 597 598 return rc; 493 599 } 494 600 … … 508 614 */ 509 615 int block_seqread(dev_handle_t dev_handle, off_t *bufpos, size_t *buflen, 510 off_t *pos, void *dst, size_t size , size_t block_size)616 off_t *pos, void *dst, size_t size) 511 617 { 512 618 off_t offset = 0; 513 619 size_t left = size; 514 devcon_t *devcon = devcon_search(dev_handle); 515 assert(devcon); 516 517 fibril_mutex_lock(&devcon->com_area_lock); 620 size_t block_size; 621 devcon_t *devcon; 622 623 devcon = devcon_search(dev_handle); 624 assert(devcon); 625 block_size = devcon->pblock_size; 626 627 fibril_mutex_lock(&devcon->comm_area_lock); 518 628 while (left > 0) { 519 629 size_t rd; … … 529 639 * destination buffer. 530 640 */ 531 memcpy(dst + offset, devcon->com _area + *bufpos, rd);641 memcpy(dst + offset, devcon->comm_area + *bufpos, rd); 532 642 offset += rd; 533 643 *bufpos += rd; … … 540 650 int rc; 541 651 542 rc = read_block (devcon, *pos / block_size, block_size);652 rc = read_blocks(devcon, *pos / block_size, 1); 543 653 if (rc != EOK) { 544 fibril_mutex_unlock(&devcon->com _area_lock);654 fibril_mutex_unlock(&devcon->comm_area_lock); 545 655 return rc; 546 656 } … … 550 660 } 551 661 } 552 fibril_mutex_unlock(&devcon->com _area_lock);662 fibril_mutex_unlock(&devcon->comm_area_lock); 553 663 554 664 return EOK; 555 665 } 556 666 557 /** Read block from block device. 667 /** Read blocks directly from device (bypass cache). 668 * 669 * @param dev_handle Device handle of the block device. 670 * @param ba Address of first block. 671 * @param cnt Number of blocks. 672 * @param src Buffer for storing the data. 673 * 674 * @return EOK on success or negative error code on failure. 675 */ 676 int block_read_direct(dev_handle_t dev_handle, bn_t ba, size_t cnt, void *buf) 677 { 678 devcon_t *devcon; 679 int rc; 680 681 devcon = devcon_search(dev_handle); 682 assert(devcon); 683 684 fibril_mutex_lock(&devcon->comm_area_lock); 685 686 rc = read_blocks(devcon, ba, cnt); 687 if (rc == EOK) 688 memcpy(buf, devcon->comm_area, devcon->pblock_size * cnt); 689 690 fibril_mutex_unlock(&devcon->comm_area_lock); 691 692 return rc; 693 } 694 695 /** Write blocks directly to device (bypass cache). 696 * 697 * @param dev_handle Device handle of the block device. 698 * @param ba Address of first block. 699 * @param cnt Number of blocks. 700 * @param src The data to be written. 701 * 702 * @return EOK on success or negative error code on failure. 703 */ 704 int block_write_direct(dev_handle_t dev_handle, bn_t ba, size_t cnt, 705 const void *data) 706 { 707 devcon_t *devcon; 708 int rc; 709 710 devcon = devcon_search(dev_handle); 711 assert(devcon); 712 713 fibril_mutex_lock(&devcon->comm_area_lock); 714 715 memcpy(devcon->comm_area, data, devcon->pblock_size * cnt); 716 rc = read_blocks(devcon, ba, cnt); 717 718 fibril_mutex_unlock(&devcon->comm_area_lock); 719 720 return rc; 721 } 722 723 /** Get device block size. 724 * 725 * @param dev_handle Device handle of the block device. 726 * @param bsize Output block size. 727 * 728 * @return EOK on success or negative error code on failure. 729 */ 730 int block_get_bsize(dev_handle_t dev_handle, size_t *bsize) 731 { 732 devcon_t *devcon; 733 734 devcon = devcon_search(dev_handle); 735 assert(devcon); 736 737 return get_block_size(devcon->dev_phone, bsize); 738 } 739 740 /** Read blocks from block device. 558 741 * 559 742 * @param devcon Device connection. 560 * @param b off Block index.561 * @param block_size Block size.743 * @param ba Address of first block. 744 * @param cnt Number of blocks. 562 745 * @param src Buffer for storing the data. 563 746 * 564 747 * @return EOK on success or negative error code on failure. 565 748 */ 566 static int read_block(devcon_t *devcon, bn_t boff, size_t block_size) 567 { 568 ipcarg_t retval; 749 static int read_blocks(devcon_t *devcon, bn_t ba, size_t cnt) 750 { 569 751 int rc; 570 752 571 753 assert(devcon); 572 rc = async_req_2_1(devcon->dev_phone, BD_READ_BLOCK, boff, block_size, 573 &retval); 574 if ((rc != EOK) || (retval != EOK)) 575 return (rc != EOK ? rc : (int) retval); 576 577 return EOK; 754 rc = async_req_3_0(devcon->dev_phone, BD_READ_BLOCKS, LOWER32(ba), 755 UPPER32(ba), cnt); 756 return rc; 578 757 } 579 758 … … 581 760 * 582 761 * @param devcon Device connection. 583 * @param b off Block index.584 * @param block_size Block size.762 * @param ba Address of first block. 763 * @param cnt Number of blocks. 585 764 * @param src Buffer containing the data to write. 586 765 * 587 766 * @return EOK on success or negative error code on failure. 588 767 */ 589 static int write_block(devcon_t *devcon, bn_t boff, size_t block_size) 590 { 591 ipcarg_t retval; 768 static int write_blocks(devcon_t *devcon, bn_t ba, size_t cnt) 769 { 592 770 int rc; 593 771 594 772 assert(devcon); 595 rc = async_req_2_1(devcon->dev_phone, BD_WRITE_BLOCK, boff, block_size, 596 &retval); 597 if ((rc != EOK) || (retval != EOK)) 598 return (rc != EOK ? rc : (int) retval); 599 600 return EOK; 773 rc = async_req_3_0(devcon->dev_phone, BD_WRITE_BLOCKS, LOWER32(ba), 774 UPPER32(ba), cnt); 775 return rc; 776 } 777 778 /** Get block size used by the device. */ 779 static int get_block_size(int dev_phone, size_t *bsize) 780 { 781 ipcarg_t bs; 782 int rc; 783 784 rc = async_req_0_1(dev_phone, BD_GET_BLOCK_SIZE, &bs); 785 if (rc == EOK) 786 *bsize = (size_t) bs; 787 788 return rc; 601 789 } 602 790
Note:
See TracChangeset
for help on using the changeset viewer.