Changes in uspace/lib/libblock/libblock.c [402a18f:d68e4d5] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/libblock/libblock.c
r402a18f rd68e4d5 306 306 b->refcnt = 1; 307 307 b->dirty = false; 308 b->toxic = false;309 308 fibril_rwlock_initialize(&b->contents_lock); 310 309 link_initialize(&b->free_link); … … 314 313 /** Instantiate a block in memory and get a reference to it. 315 314 * 316 * @param block Pointer to where the function will store the317 * block pointer on success.318 315 * @param dev_handle Device handle of the block device. 319 316 * @param boff Block offset. … … 322 319 * device. 323 320 * 324 * @return EOK on success or a negative error code.325 */ 326 int block_get(block_t **block,dev_handle_t dev_handle, bn_t boff, int flags)321 * @return Block structure. 322 */ 323 block_t *block_get(dev_handle_t dev_handle, bn_t boff, int flags) 327 324 { 328 325 devcon_t *devcon; … … 331 328 link_t *l; 332 329 unsigned long key = boff; 333 int rc = EOK;330 bn_t oboff; 334 331 335 332 devcon = devcon_search(dev_handle); … … 339 336 340 337 cache = devcon->cache; 341 342 retry:343 338 fibril_mutex_lock(&cache->lock); 344 339 l = hash_table_find(&cache->block_hash, &key); … … 351 346 if (b->refcnt++ == 0) 352 347 list_remove(&b->free_link); 353 if (b->toxic)354 rc = EIO;355 348 fibril_mutex_unlock(&b->lock); 356 349 fibril_mutex_unlock(&cache->lock); … … 359 352 * The block was not found in the cache. 360 353 */ 354 int rc; 355 bool sync = false; 356 361 357 if (cache_can_grow(cache)) { 362 358 /* … … 382 378 assert(!list_empty(&cache->free_head)); 383 379 l = cache->free_head.next; 380 list_remove(l); 384 381 b = list_get_instance(l, block_t, free_link); 385 386 fibril_mutex_lock(&b->lock); 387 if (b->dirty) { 388 /* 389 * The block needs to be written back to the 390 * device before it changes identity. Do this 391 * while not holding the cache lock so that 392 * concurrency is not impeded. Also move the 393 * block to the end of the free list so that we 394 * do not slow down other instances of 395 * block_get() draining the free list. 396 */ 397 list_remove(&b->free_link); 398 list_append(&b->free_link, &cache->free_head); 399 fibril_mutex_unlock(&cache->lock); 400 fibril_mutex_lock(&devcon->com_area_lock); 401 memcpy(devcon->com_area, b->data, b->size); 402 rc = write_block(devcon, b->boff, 403 cache->block_size); 404 fibril_mutex_unlock(&devcon->com_area_lock); 405 if (rc != EOK) { 406 /* 407 * We did not manage to write the block 408 * to the device. Keep it around for 409 * another try. Hopefully, we will grab 410 * another block next time. 411 */ 412 fibril_mutex_unlock(&b->lock); 413 goto retry; 414 } 415 b->dirty = false; 416 if (!fibril_mutex_trylock(&cache->lock)) { 417 /* 418 * Somebody is probably racing with us. 419 * Unlock the block and retry. 420 */ 421 fibril_mutex_unlock(&b->lock); 422 goto retry; 423 } 424 425 } 426 fibril_mutex_unlock(&b->lock); 427 428 /* 429 * Unlink the block from the free list and the hash 430 * table. 431 */ 432 list_remove(&b->free_link); 382 sync = b->dirty; 383 oboff = b->boff; 433 384 temp_key = b->boff; 434 385 hash_table_remove(&cache->block_hash, &temp_key, 1); … … 443 394 /* 444 395 * Lock the block before releasing the cache lock. Thus we don't 445 * kill concur rent operations on the cache while doing I/O on446 * theblock.396 * kill concurent operations on the cache while doing I/O on the 397 * block. 447 398 */ 448 399 fibril_mutex_lock(&b->lock); 449 400 fibril_mutex_unlock(&cache->lock); 450 401 402 if (sync) { 403 /* 404 * The block is dirty and needs to be written back to 405 * the device before we can read in the new contents. 406 */ 407 fibril_mutex_lock(&devcon->com_area_lock); 408 memcpy(devcon->com_area, b->data, b->size); 409 rc = write_block(devcon, oboff, cache->block_size); 410 assert(rc == EOK); 411 fibril_mutex_unlock(&devcon->com_area_lock); 412 } 451 413 if (!(flags & BLOCK_FLAGS_NOREAD)) { 452 414 /* … … 456 418 fibril_mutex_lock(&devcon->com_area_lock); 457 419 rc = read_block(devcon, b->boff, cache->block_size); 420 assert(rc == EOK); 458 421 memcpy(b->data, devcon->com_area, cache->block_size); 459 422 fibril_mutex_unlock(&devcon->com_area_lock); 460 if (rc != EOK) 461 b->toxic = true; 462 } else 463 rc = EOK; 423 } 464 424 465 425 fibril_mutex_unlock(&b->lock); 466 426 } 467 *block = b; 468 return rc; 427 return b; 469 428 } 470 429 … … 474 433 * 475 434 * @param block Block of which a reference is to be released. 476 * 477 * @return EOK on success or a negative error code. 478 */ 479 int block_put(block_t *block) 435 */ 436 void block_put(block_t *block) 480 437 { 481 438 devcon_t *devcon = devcon_search(block->dev_handle); 482 439 cache_t *cache; 483 unsigned blocks_cached; 484 enum cache_mode mode; 485 int rc = EOK; 440 int rc; 486 441 487 442 assert(devcon); … … 489 444 490 445 cache = devcon->cache; 491 492 retry:493 fibril_mutex_lock(&cache->lock);494 blocks_cached = cache->blocks_cached;495 mode = cache->mode;496 fibril_mutex_unlock(&cache->lock);497 498 /*499 * Determine whether to sync the block. Syncing the block is best done500 * when not holding the cache lock as it does not impede concurrency.501 * Since the situation may have changed when we unlocked the cache, the502 * blocks_cached and mode variables are mere hints. We will recheck the503 * conditions later when the cache lock is held again.504 */505 fibril_mutex_lock(&block->lock);506 if (block->toxic)507 block->dirty = false; /* will not write back toxic block */508 if (block->dirty && (block->refcnt == 1) &&509 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) {510 fibril_mutex_lock(&devcon->com_area_lock);511 memcpy(devcon->com_area, block->data, block->size);512 rc = write_block(devcon, block->boff, block->size);513 fibril_mutex_unlock(&devcon->com_area_lock);514 block->dirty = false;515 }516 fibril_mutex_unlock(&block->lock);517 518 446 fibril_mutex_lock(&cache->lock); 519 447 fibril_mutex_lock(&block->lock); … … 521 449 /* 522 450 * Last reference to the block was dropped. Either free the 523 * block or put it on the free list. In case of an I/O error, 524 * free the block. 451 * block or put it on the free list. 525 452 */ 526 if ((cache->blocks_cached > CACHE_HI_WATERMARK) || 527 (rc != EOK)) { 453 if (cache->blocks_cached > CACHE_HI_WATERMARK) { 528 454 /* 529 * Currently there are too many cached blocks or there 530 * was an I/O error when writing the block back to the 531 * device. 455 * Currently there are too many cached blocks. 532 456 */ 533 457 if (block->dirty) { 534 /* 535 * We cannot sync the block while holding the 536 * cache lock. Release everything and retry. 537 */ 538 block->refcnt++; 539 fibril_mutex_unlock(&block->lock); 540 fibril_mutex_unlock(&cache->lock); 541 goto retry; 458 fibril_mutex_lock(&devcon->com_area_lock); 459 memcpy(devcon->com_area, block->data, 460 block->size); 461 rc = write_block(devcon, block->boff, 462 block->size); 463 assert(rc == EOK); 464 fibril_mutex_unlock(&devcon->com_area_lock); 542 465 } 543 466 /* … … 550 473 cache->blocks_cached--; 551 474 fibril_mutex_unlock(&cache->lock); 552 return rc;475 return; 553 476 } 554 477 /* 555 478 * Put the block on the free list. 556 479 */ 480 list_append(&block->free_link, &cache->free_head); 557 481 if (cache->mode != CACHE_MODE_WB && block->dirty) { 558 /* 559 * We cannot sync the block while holding the cache 560 * lock. Release everything and retry. 561 */ 562 block->refcnt++; 563 fibril_mutex_unlock(&block->lock); 564 fibril_mutex_unlock(&cache->lock); 565 goto retry; 566 } 567 list_append(&block->free_link, &cache->free_head); 482 fibril_mutex_lock(&devcon->com_area_lock); 483 memcpy(devcon->com_area, block->data, block->size); 484 rc = write_block(devcon, block->boff, block->size); 485 assert(rc == EOK); 486 fibril_mutex_unlock(&devcon->com_area_lock); 487 488 block->dirty = false; 489 } 568 490 } 569 491 fibril_mutex_unlock(&block->lock); 570 492 fibril_mutex_unlock(&cache->lock); 571 572 return rc;573 493 } 574 494
Note:
See TracChangeset
for help on using the changeset viewer.