Changes in uspace/lib/libblock/libblock.c [d68e4d5:402a18f] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/libblock/libblock.c
rd68e4d5 r402a18f 306 306 b->refcnt = 1; 307 307 b->dirty = false; 308 b->toxic = false; 308 309 fibril_rwlock_initialize(&b->contents_lock); 309 310 link_initialize(&b->free_link); … … 313 314 /** Instantiate a block in memory and get a reference to it. 314 315 * 316 * @param block Pointer to where the function will store the 317 * block pointer on success. 315 318 * @param dev_handle Device handle of the block device. 316 319 * @param boff Block offset. … … 319 322 * device. 320 323 * 321 * @return Block structure.322 */ 323 block_t *block_get(dev_handle_t dev_handle, bn_t boff, int flags)324 * @return EOK on success or a negative error code. 325 */ 326 int block_get(block_t **block, dev_handle_t dev_handle, bn_t boff, int flags) 324 327 { 325 328 devcon_t *devcon; … … 328 331 link_t *l; 329 332 unsigned long key = boff; 330 bn_t oboff;333 int rc = EOK; 331 334 332 335 devcon = devcon_search(dev_handle); … … 336 339 337 340 cache = devcon->cache; 341 342 retry: 338 343 fibril_mutex_lock(&cache->lock); 339 344 l = hash_table_find(&cache->block_hash, &key); … … 346 351 if (b->refcnt++ == 0) 347 352 list_remove(&b->free_link); 353 if (b->toxic) 354 rc = EIO; 348 355 fibril_mutex_unlock(&b->lock); 349 356 fibril_mutex_unlock(&cache->lock); … … 352 359 * The block was not found in the cache. 353 360 */ 354 int rc;355 bool sync = false;356 357 361 if (cache_can_grow(cache)) { 358 362 /* … … 378 382 assert(!list_empty(&cache->free_head)); 379 383 l = cache->free_head.next; 380 list_remove(l);381 384 b = list_get_instance(l, block_t, free_link); 382 sync = b->dirty; 383 oboff = b->boff; 385 386 fibril_mutex_lock(&b->lock); 387 if (b->dirty) { 388 /* 389 * The block needs to be written back to the 390 * device before it changes identity. Do this 391 * while not holding the cache lock so that 392 * concurrency is not impeded. Also move the 393 * block to the end of the free list so that we 394 * do not slow down other instances of 395 * block_get() draining the free list. 396 */ 397 list_remove(&b->free_link); 398 list_append(&b->free_link, &cache->free_head); 399 fibril_mutex_unlock(&cache->lock); 400 fibril_mutex_lock(&devcon->com_area_lock); 401 memcpy(devcon->com_area, b->data, b->size); 402 rc = write_block(devcon, b->boff, 403 cache->block_size); 404 fibril_mutex_unlock(&devcon->com_area_lock); 405 if (rc != EOK) { 406 /* 407 * We did not manage to write the block 408 * to the device. Keep it around for 409 * another try. Hopefully, we will grab 410 * another block next time. 411 */ 412 fibril_mutex_unlock(&b->lock); 413 goto retry; 414 } 415 b->dirty = false; 416 if (!fibril_mutex_trylock(&cache->lock)) { 417 /* 418 * Somebody is probably racing with us. 419 * Unlock the block and retry. 420 */ 421 fibril_mutex_unlock(&b->lock); 422 goto retry; 423 } 424 425 } 426 fibril_mutex_unlock(&b->lock); 427 428 /* 429 * Unlink the block from the free list and the hash 430 * table. 431 */ 432 list_remove(&b->free_link); 384 433 temp_key = b->boff; 385 434 hash_table_remove(&cache->block_hash, &temp_key, 1); … … 394 443 /* 395 444 * Lock the block before releasing the cache lock. Thus we don't 396 * kill concur ent operations on the cache while doing I/O on the397 * block.445 * kill concurrent operations on the cache while doing I/O on 446 * the block. 398 447 */ 399 448 fibril_mutex_lock(&b->lock); 400 449 fibril_mutex_unlock(&cache->lock); 401 450 402 if (sync) {403 /*404 * The block is dirty and needs to be written back to405 * the device before we can read in the new contents.406 */407 fibril_mutex_lock(&devcon->com_area_lock);408 memcpy(devcon->com_area, b->data, b->size);409 rc = write_block(devcon, oboff, cache->block_size);410 assert(rc == EOK);411 fibril_mutex_unlock(&devcon->com_area_lock);412 }413 451 if (!(flags & BLOCK_FLAGS_NOREAD)) { 414 452 /* … … 418 456 fibril_mutex_lock(&devcon->com_area_lock); 419 457 rc = read_block(devcon, b->boff, cache->block_size); 420 assert(rc == EOK);421 458 memcpy(b->data, devcon->com_area, cache->block_size); 422 459 fibril_mutex_unlock(&devcon->com_area_lock); 423 } 460 if (rc != EOK) 461 b->toxic = true; 462 } else 463 rc = EOK; 424 464 425 465 fibril_mutex_unlock(&b->lock); 426 466 } 427 return b; 467 *block = b; 468 return rc; 428 469 } 429 470 … … 433 474 * 434 475 * @param block Block of which a reference is to be released. 435 */ 436 void block_put(block_t *block) 476 * 477 * @return EOK on success or a negative error code. 478 */ 479 int block_put(block_t *block) 437 480 { 438 481 devcon_t *devcon = devcon_search(block->dev_handle); 439 482 cache_t *cache; 440 int rc; 483 unsigned blocks_cached; 484 enum cache_mode mode; 485 int rc = EOK; 441 486 442 487 assert(devcon); … … 444 489 445 490 cache = devcon->cache; 491 492 retry: 493 fibril_mutex_lock(&cache->lock); 494 blocks_cached = cache->blocks_cached; 495 mode = cache->mode; 496 fibril_mutex_unlock(&cache->lock); 497 498 /* 499 * Determine whether to sync the block. Syncing the block is best done 500 * when not holding the cache lock as it does not impede concurrency. 501 * Since the situation may have changed when we unlocked the cache, the 502 * blocks_cached and mode variables are mere hints. We will recheck the 503 * conditions later when the cache lock is held again. 504 */ 505 fibril_mutex_lock(&block->lock); 506 if (block->toxic) 507 block->dirty = false; /* will not write back toxic block */ 508 if (block->dirty && (block->refcnt == 1) && 509 (blocks_cached > CACHE_HI_WATERMARK || mode != CACHE_MODE_WB)) { 510 fibril_mutex_lock(&devcon->com_area_lock); 511 memcpy(devcon->com_area, block->data, block->size); 512 rc = write_block(devcon, block->boff, block->size); 513 fibril_mutex_unlock(&devcon->com_area_lock); 514 block->dirty = false; 515 } 516 fibril_mutex_unlock(&block->lock); 517 446 518 fibril_mutex_lock(&cache->lock); 447 519 fibril_mutex_lock(&block->lock); … … 449 521 /* 450 522 * Last reference to the block was dropped. Either free the 451 * block or put it on the free list. 523 * block or put it on the free list. In case of an I/O error, 524 * free the block. 452 525 */ 453 if (cache->blocks_cached > CACHE_HI_WATERMARK) { 454 /* 455 * Currently there are too many cached blocks. 526 if ((cache->blocks_cached > CACHE_HI_WATERMARK) || 527 (rc != EOK)) { 528 /* 529 * Currently there are too many cached blocks or there 530 * was an I/O error when writing the block back to the 531 * device. 456 532 */ 457 533 if (block->dirty) { 458 fibril_mutex_lock(&devcon->com_area_lock); 459 memcpy(devcon->com_area, block->data, 460 block->size); 461 rc = write_block(devcon, block->boff, 462 block->size); 463 assert(rc == EOK); 464 fibril_mutex_unlock(&devcon->com_area_lock); 534 /* 535 * We cannot sync the block while holding the 536 * cache lock. Release everything and retry. 537 */ 538 block->refcnt++; 539 fibril_mutex_unlock(&block->lock); 540 fibril_mutex_unlock(&cache->lock); 541 goto retry; 465 542 } 466 543 /* … … 473 550 cache->blocks_cached--; 474 551 fibril_mutex_unlock(&cache->lock); 475 return ;552 return rc; 476 553 } 477 554 /* 478 555 * Put the block on the free list. 479 556 */ 557 if (cache->mode != CACHE_MODE_WB && block->dirty) { 558 /* 559 * We cannot sync the block while holding the cache 560 * lock. Release everything and retry. 561 */ 562 block->refcnt++; 563 fibril_mutex_unlock(&block->lock); 564 fibril_mutex_unlock(&cache->lock); 565 goto retry; 566 } 480 567 list_append(&block->free_link, &cache->free_head); 481 if (cache->mode != CACHE_MODE_WB && block->dirty) {482 fibril_mutex_lock(&devcon->com_area_lock);483 memcpy(devcon->com_area, block->data, block->size);484 rc = write_block(devcon, block->boff, block->size);485 assert(rc == EOK);486 fibril_mutex_unlock(&devcon->com_area_lock);487 488 block->dirty = false;489 }490 568 } 491 569 fibril_mutex_unlock(&block->lock); 492 570 fibril_mutex_unlock(&cache->lock); 571 572 return rc; 493 573 } 494 574
Note:
See TracChangeset
for help on using the changeset viewer.