Changes in uspace/srv/fs/fat/fat_ops.c [b69e4c0:c7bbf029] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/fs/fat/fat_ops.c
rb69e4c0 rc7bbf029 1 1 /* 2 2 * Copyright (c) 2008 Jakub Jermar 3 * Copyright (c) 2011 Oleg Romanenko4 3 * All rights reserved. 5 4 * … … 30 29 /** @addtogroup fs 31 30 * @{ 32 */ 31 */ 33 32 34 33 /** … … 40 39 #include "fat_dentry.h" 41 40 #include "fat_fat.h" 42 #include "fat_directory.h"43 41 #include "../../vfs/vfs.h" 44 42 #include <libfs.h> 45 43 #include <libblock.h> 46 44 #include <ipc/services.h> 47 #include <ipc/ loc.h>45 #include <ipc/devmap.h> 48 46 #include <macros.h> 49 47 #include <async.h> … … 58 56 #include <align.h> 59 57 #include <malloc.h> 60 #include <str.h>61 58 62 59 #define FAT_NODE(node) ((node) ? (fat_node_t *) (node)->data : NULL) … … 70 67 71 68 /** List of cached free FAT nodes. */ 72 static LIST_INITIALIZE(ffn_ list);69 static LIST_INITIALIZE(ffn_head); 73 70 74 71 /* 75 72 * Forward declarations of FAT libfs operations. 76 73 */ 77 static int fat_root_get(fs_node_t **, service_id_t);74 static int fat_root_get(fs_node_t **, devmap_handle_t); 78 75 static int fat_match(fs_node_t **, fs_node_t *, const char *); 79 static int fat_node_get(fs_node_t **, service_id_t, fs_index_t);76 static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t); 80 77 static int fat_node_open(fs_node_t *); 81 78 static int fat_node_put(fs_node_t *); 82 static int fat_create_node(fs_node_t **, service_id_t, int);79 static int fat_create_node(fs_node_t **, devmap_handle_t, int); 83 80 static int fat_destroy_node(fs_node_t *); 84 81 static int fat_link(fs_node_t *, fs_node_t *, const char *); … … 88 85 static aoff64_t fat_size_get(fs_node_t *); 89 86 static unsigned fat_lnkcnt_get(fs_node_t *); 87 static char fat_plb_get_char(unsigned); 90 88 static bool fat_is_directory(fs_node_t *); 91 89 static bool fat_is_file(fs_node_t *node); 92 static service_id_t fat_service_get(fs_node_t *node);90 static devmap_handle_t fat_device_get(fs_node_t *node); 93 91 94 92 /* … … 107 105 node->dirty = false; 108 106 node->lastc_cached_valid = false; 109 node->lastc_cached_value = 0;107 node->lastc_cached_value = FAT_CLST_LAST1; 110 108 node->currc_cached_valid = false; 111 109 node->currc_cached_bn = 0; 112 node->currc_cached_value = 0;110 node->currc_cached_value = FAT_CLST_LAST1; 113 111 } 114 112 … … 119 117 fat_dentry_t *d; 120 118 int rc; 121 119 122 120 assert(node->dirty); 123 121 124 bs = block_bb_get(node->idx-> service_id);125 122 bs = block_bb_get(node->idx->devmap_handle); 123 126 124 /* Read the block that contains the dentry of interest. */ 127 rc = _fat_block_get(&b, bs, node->idx-> service_id, node->idx->pfc,125 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc, 128 126 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 129 127 BLOCK_FLAGS_NONE); … … 139 137 d->attr = FAT_ATTR_SUBDIR; 140 138 } 141 139 142 140 /* TODO: update other fields? (e.g time fields) */ 143 141 144 142 b->dirty = true; /* need to sync block */ 145 143 rc = block_put(b); … … 147 145 } 148 146 149 static int fat_node_fini_by_service_id(service_id_t service_id) 150 { 147 static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle) 148 { 149 link_t *lnk; 151 150 fat_node_t *nodep; 152 151 int rc; … … 160 159 restart: 161 160 fibril_mutex_lock(&ffn_mutex); 162 list_foreach(ffn_list, lnk) {161 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) { 163 162 nodep = list_get_instance(lnk, fat_node_t, ffn_link); 164 163 if (!fibril_mutex_trylock(&nodep->lock)) { … … 171 170 goto restart; 172 171 } 173 if (nodep->idx-> service_id != service_id) {172 if (nodep->idx->devmap_handle != devmap_handle) { 174 173 fibril_mutex_unlock(&nodep->idx->lock); 175 174 fibril_mutex_unlock(&nodep->lock); … … 197 196 free(nodep); 198 197 199 /* Need to restart because we changed ffn_list. */198 /* Need to restart because we changed the ffn_head list. */ 200 199 goto restart; 201 200 } … … 212 211 213 212 fibril_mutex_lock(&ffn_mutex); 214 if (!list_empty(&ffn_ list)) {213 if (!list_empty(&ffn_head)) { 215 214 /* Try to use a cached free node structure. */ 216 215 fat_idx_t *idxp_tmp; 217 nodep = list_get_instance(list_first(&ffn_list), fat_node_t, 218 ffn_link); 216 nodep = list_get_instance(ffn_head.next, fat_node_t, ffn_link); 219 217 if (!fibril_mutex_trylock(&nodep->lock)) 220 218 goto skip_cache; … … 258 256 fn->data = nodep; 259 257 nodep->bp = fn; 260 258 261 259 *nodepp = nodep; 262 260 return EOK; … … 294 292 * We must instantiate the node from the file system. 295 293 */ 296 294 297 295 assert(idxp->pfc); 298 296 … … 301 299 return rc; 302 300 303 bs = block_bb_get(idxp-> service_id);301 bs = block_bb_get(idxp->devmap_handle); 304 302 305 303 /* Read the block that contains the dentry of interest. */ 306 rc = _fat_block_get(&b, bs, idxp-> service_id, idxp->pfc, NULL,304 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL, 307 305 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE); 308 306 if (rc != EOK) { … … 312 310 313 311 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs)); 314 if (FAT_IS_FAT32(bs)) {315 nodep->firstc = uint16_t_le2host(d->firstc_lo) |316 (uint16_t_le2host(d->firstc_hi) << 16);317 } else318 nodep->firstc = uint16_t_le2host(d->firstc);319 320 312 if (d->attr & FAT_ATTR_SUBDIR) { 321 /* 313 /* 322 314 * The only directory which does not have this bit set is the 323 315 * root directory itself. The root directory node is handled … … 325 317 */ 326 318 nodep->type = FAT_DIRECTORY; 327 328 319 /* 329 320 * Unfortunately, the 'size' field of the FAT dentry is not … … 331 322 * size of the directory by walking the FAT. 332 323 */ 333 uint 32_t clusters;334 rc = fat_clusters_get(&clusters, bs, idxp-> service_id,335 nodep->firstc);324 uint16_t clusters; 325 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle, 326 uint16_t_le2host(d->firstc)); 336 327 if (rc != EOK) { 337 328 (void) block_put(b); … … 344 335 nodep->size = uint32_t_le2host(d->size); 345 336 } 346 337 nodep->firstc = uint16_t_le2host(d->firstc); 347 338 nodep->lnkcnt = 1; 348 339 nodep->refcnt = 1; … … 366 357 */ 367 358 368 int fat_root_get(fs_node_t **rfn, service_id_t service_id)369 { 370 return fat_node_get(rfn, service_id, 0);359 int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle) 360 { 361 return fat_node_get(rfn, devmap_handle, 0); 371 362 } 372 363 373 364 int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component) 374 365 { 366 fat_bs_t *bs; 375 367 fat_node_t *parentp = FAT_NODE(pfn); 376 char name[FAT_LFN_NAME_SIZE]; 368 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; 369 unsigned i, j; 370 unsigned blocks; 377 371 fat_dentry_t *d; 378 service_id_t service_id; 372 devmap_handle_t devmap_handle; 373 block_t *b; 379 374 int rc; 380 375 381 376 fibril_mutex_lock(&parentp->idx->lock); 382 service_id = parentp->idx->service_id;377 devmap_handle = parentp->idx->devmap_handle; 383 378 fibril_mutex_unlock(&parentp->idx->lock); 384 385 fat_directory_t di; 386 rc = fat_directory_open(parentp, &di); 387 if (rc != EOK) 388 return rc; 389 390 while (fat_directory_read(&di, name, &d) == EOK) { 391 if (fat_dentry_namecmp(name, component) == 0) { 392 /* hit */ 393 fat_node_t *nodep; 394 aoff64_t o = di.pos % 395 (BPS(di.bs) / sizeof(fat_dentry_t)); 396 fat_idx_t *idx = fat_idx_get_by_pos(service_id, 397 parentp->firstc, di.bnum * DPS(di.bs) + o); 398 if (!idx) { 399 /* 400 * Can happen if memory is low or if we 401 * run out of 32-bit indices. 402 */ 403 rc = fat_directory_close(&di); 404 return (rc == EOK) ? ENOMEM : rc; 379 380 bs = block_bb_get(devmap_handle); 381 blocks = parentp->size / BPS(bs); 382 for (i = 0; i < blocks; i++) { 383 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 384 if (rc != EOK) 385 return rc; 386 for (j = 0; j < DPS(bs); j++) { 387 d = ((fat_dentry_t *)b->data) + j; 388 switch (fat_classify_dentry(d)) { 389 case FAT_DENTRY_SKIP: 390 case FAT_DENTRY_FREE: 391 continue; 392 case FAT_DENTRY_LAST: 393 /* miss */ 394 rc = block_put(b); 395 *rfn = NULL; 396 return rc; 397 default: 398 case FAT_DENTRY_VALID: 399 fat_dentry_name_get(d, name); 400 break; 405 401 } 406 rc = fat_node_get_core(&nodep, idx); 407 fibril_mutex_unlock(&idx->lock); 408 if (rc != EOK) { 409 (void) fat_directory_close(&di); 402 if (fat_dentry_namecmp(name, component) == 0) { 403 /* hit */ 404 fat_node_t *nodep; 405 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle, 406 parentp->firstc, i * DPS(bs) + j); 407 if (!idx) { 408 /* 409 * Can happen if memory is low or if we 410 * run out of 32-bit indices. 411 */ 412 rc = block_put(b); 413 return (rc == EOK) ? ENOMEM : rc; 414 } 415 rc = fat_node_get_core(&nodep, idx); 416 fibril_mutex_unlock(&idx->lock); 417 if (rc != EOK) { 418 (void) block_put(b); 419 return rc; 420 } 421 *rfn = FS_NODE(nodep); 422 rc = block_put(b); 423 if (rc != EOK) 424 (void) fat_node_put(*rfn); 410 425 return rc; 411 426 } 412 *rfn = FS_NODE(nodep); 413 rc = fat_directory_close(&di); 414 if (rc != EOK) 415 (void) fat_node_put(*rfn); 427 } 428 rc = block_put(b); 429 if (rc != EOK) 416 430 return rc; 417 } else { 418 rc = fat_directory_next(&di); 419 if (rc != EOK) 420 break; 421 } 422 } 423 (void) fat_directory_close(&di); 431 } 432 424 433 *rfn = NULL; 425 434 return EOK; … … 427 436 428 437 /** Instantiate a FAT in-core node. */ 429 int fat_node_get(fs_node_t **rfn, service_id_t service_id, fs_index_t index)438 int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index) 430 439 { 431 440 fat_node_t *nodep; … … 433 442 int rc; 434 443 435 idxp = fat_idx_get_by_index( service_id, index);444 idxp = fat_idx_get_by_index(devmap_handle, index); 436 445 if (!idxp) { 437 446 *rfn = NULL; … … 464 473 if (nodep->idx) { 465 474 fibril_mutex_lock(&ffn_mutex); 466 list_append(&nodep->ffn_link, &ffn_ list);475 list_append(&nodep->ffn_link, &ffn_head); 467 476 fibril_mutex_unlock(&ffn_mutex); 468 477 } else { … … 484 493 } 485 494 486 int fat_create_node(fs_node_t **rfn, service_id_t service_id, int flags)495 int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags) 487 496 { 488 497 fat_idx_t *idxp; … … 492 501 int rc; 493 502 494 bs = block_bb_get( service_id);503 bs = block_bb_get(devmap_handle); 495 504 if (flags & L_DIRECTORY) { 496 505 /* allocate a cluster */ 497 rc = fat_alloc_clusters(bs, service_id, 1, &mcl, &lcl);506 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl); 498 507 if (rc != EOK) 499 508 return rc; 500 509 /* populate the new cluster with unused dentries */ 501 rc = fat_zero_cluster(bs, service_id, mcl);510 rc = fat_zero_cluster(bs, devmap_handle, mcl); 502 511 if (rc != EOK) { 503 (void) fat_free_clusters(bs, service_id, mcl);512 (void) fat_free_clusters(bs, devmap_handle, mcl); 504 513 return rc; 505 514 } … … 508 517 rc = fat_node_get_new(&nodep); 509 518 if (rc != EOK) { 510 (void) fat_free_clusters(bs, service_id, mcl);519 (void) fat_free_clusters(bs, devmap_handle, mcl); 511 520 return rc; 512 521 } 513 rc = fat_idx_get_new(&idxp, service_id);514 if (rc != EOK) { 515 (void) fat_free_clusters(bs, service_id, mcl);522 rc = fat_idx_get_new(&idxp, devmap_handle); 523 if (rc != EOK) { 524 (void) fat_free_clusters(bs, devmap_handle, mcl); 516 525 (void) fat_node_put(FS_NODE(nodep)); 517 526 return rc; … … 562 571 assert(!has_children); 563 572 564 bs = block_bb_get(nodep->idx-> service_id);573 bs = block_bb_get(nodep->idx->devmap_handle); 565 574 if (nodep->firstc != FAT_CLST_RES0) { 566 575 assert(nodep->size); 567 576 /* Free all clusters allocated to the node. */ 568 rc = fat_free_clusters(bs, nodep->idx-> service_id,577 rc = fat_free_clusters(bs, nodep->idx->devmap_handle, 569 578 nodep->firstc); 570 579 } … … 583 592 fat_bs_t *bs; 584 593 block_t *b; 585 fat_directory_t di; 586 fat_dentry_t de; 594 unsigned i, j; 595 unsigned blocks; 596 fat_cluster_t mcl, lcl; 587 597 int rc; 588 598 … … 598 608 fibril_mutex_unlock(&childp->lock); 599 609 600 if (!fat_valid_name(name)) 610 if (!fat_dentry_name_verify(name)) { 611 /* 612 * Attempt to create unsupported name. 613 */ 601 614 return ENOTSUP; 602 615 } 616 617 /* 618 * Get us an unused parent node's dentry or grow the parent and allocate 619 * a new one. 620 */ 621 603 622 fibril_mutex_lock(&parentp->idx->lock); 604 bs = block_bb_get(parentp->idx->service_id); 605 rc = fat_directory_open(parentp, &di); 623 bs = block_bb_get(parentp->idx->devmap_handle); 624 625 blocks = parentp->size / BPS(bs); 626 627 for (i = 0; i < blocks; i++) { 628 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 629 if (rc != EOK) { 630 fibril_mutex_unlock(&parentp->idx->lock); 631 return rc; 632 } 633 for (j = 0; j < DPS(bs); j++) { 634 d = ((fat_dentry_t *)b->data) + j; 635 switch (fat_classify_dentry(d)) { 636 case FAT_DENTRY_SKIP: 637 case FAT_DENTRY_VALID: 638 /* skipping used and meta entries */ 639 continue; 640 case FAT_DENTRY_FREE: 641 case FAT_DENTRY_LAST: 642 /* found an empty slot */ 643 goto hit; 644 } 645 } 646 rc = block_put(b); 647 if (rc != EOK) { 648 fibril_mutex_unlock(&parentp->idx->lock); 649 return rc; 650 } 651 } 652 j = 0; 653 654 /* 655 * We need to grow the parent in order to create a new unused dentry. 656 */ 657 if (parentp->firstc == FAT_CLST_ROOT) { 658 /* Can't grow the root directory. */ 659 fibril_mutex_unlock(&parentp->idx->lock); 660 return ENOSPC; 661 } 662 rc = fat_alloc_clusters(bs, parentp->idx->devmap_handle, 1, &mcl, &lcl); 606 663 if (rc != EOK) { 607 664 fibril_mutex_unlock(&parentp->idx->lock); 608 665 return rc; 609 666 } 610 667 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl); 668 if (rc != EOK) { 669 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl); 670 fibril_mutex_unlock(&parentp->idx->lock); 671 return rc; 672 } 673 rc = fat_append_clusters(bs, parentp, mcl, lcl); 674 if (rc != EOK) { 675 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl); 676 fibril_mutex_unlock(&parentp->idx->lock); 677 return rc; 678 } 679 parentp->size += BPS(bs) * SPC(bs); 680 parentp->dirty = true; /* need to sync node */ 681 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 682 if (rc != EOK) { 683 fibril_mutex_unlock(&parentp->idx->lock); 684 return rc; 685 } 686 d = (fat_dentry_t *)b->data; 687 688 hit: 611 689 /* 612 690 * At this point we only establish the link between the parent and the … … 615 693 * dentry data is kept in the child node structure. 616 694 */ 617 memset( &de, 0, sizeof(fat_dentry_t));618 619 rc = fat_directory_write(&di, name, &de);620 if (rc != EOK) {621 (void) fat_directory_close(&di);622 fibril_mutex_unlock(&parentp->idx->lock);695 memset(d, 0, sizeof(fat_dentry_t)); 696 fat_dentry_name_set(d, name); 697 b->dirty = true; /* need to sync block */ 698 rc = block_put(b); 699 fibril_mutex_unlock(&parentp->idx->lock); 700 if (rc != EOK) 623 701 return rc; 624 }625 rc = fat_directory_close(&di);626 if (rc != EOK) {627 fibril_mutex_unlock(&parentp->idx->lock);628 return rc;629 }630 631 fibril_mutex_unlock(&parentp->idx->lock);632 702 633 703 fibril_mutex_lock(&childp->idx->lock); 634 704 635 705 if (childp->type == FAT_DIRECTORY) { 636 706 /* … … 651 721 d = (fat_dentry_t *) b->data; 652 722 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) || 653 ( bcmp(d->name, FAT_NAME_DOT, FAT_NAME_LEN)) == 0) {723 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) { 654 724 memset(d, 0, sizeof(fat_dentry_t)); 655 725 memcpy(d->name, FAT_NAME_DOT, FAT_NAME_LEN); … … 661 731 d++; 662 732 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) || 663 ( bcmp(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN) == 0)) {733 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) { 664 734 memset(d, 0, sizeof(fat_dentry_t)); 665 735 memcpy(d->name, FAT_NAME_DOT_DOT, FAT_NAME_LEN); 666 736 memcpy(d->ext, FAT_EXT_PAD, FAT_EXT_LEN); 667 737 d->attr = FAT_ATTR_SUBDIR; 668 d->firstc = (parentp->firstc == FAT_ ROOT_CLST(bs)) ?669 host2uint16_t_le(FAT_CLST_R OOTPAR) :738 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ? 739 host2uint16_t_le(FAT_CLST_RES0) : 670 740 host2uint16_t_le(parentp->firstc); 671 741 /* TODO: initialize also the date/time members. */ … … 681 751 682 752 childp->idx->pfc = parentp->firstc; 683 childp->idx->pdi = di.pos; /* di.pos holds absolute position of SFN entry */753 childp->idx->pdi = i * DPS(bs) + j; 684 754 fibril_mutex_unlock(&childp->idx->lock); 685 755 … … 701 771 fat_node_t *parentp = FAT_NODE(pfn); 702 772 fat_node_t *childp = FAT_NODE(cfn); 773 fat_bs_t *bs; 774 fat_dentry_t *d; 775 block_t *b; 703 776 bool has_children; 704 777 int rc; … … 706 779 if (!parentp) 707 780 return EBUSY; 708 781 709 782 rc = fat_has_children(&has_children, cfn); 710 783 if (rc != EOK) … … 717 790 assert(childp->lnkcnt == 1); 718 791 fibril_mutex_lock(&childp->idx->lock); 719 720 fat_directory_t di; 721 rc = fat_directory_open(parentp, &di); 722 if (rc != EOK) 792 bs = block_bb_get(childp->idx->devmap_handle); 793 794 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc, 795 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 796 BLOCK_FLAGS_NONE); 797 if (rc != EOK) 723 798 goto error; 724 rc = fat_directory_seek(&di, childp->idx->pdi); 725 if (rc != EOK) 726 goto error; 727 rc = fat_directory_erase(&di); 728 if (rc != EOK) 729 goto error; 730 rc = fat_directory_close(&di); 799 d = (fat_dentry_t *)b->data + 800 (childp->idx->pdi % (BPS(bs) / sizeof(fat_dentry_t))); 801 /* mark the dentry as not-currently-used */ 802 d->name[0] = FAT_DENTRY_ERASED; 803 b->dirty = true; /* need to sync block */ 804 rc = block_put(b); 731 805 if (rc != EOK) 732 806 goto error; … … 747 821 748 822 error: 749 (void) fat_directory_close(&di); 823 fibril_mutex_unlock(&parentp->idx->lock); 824 fibril_mutex_unlock(&childp->lock); 750 825 fibril_mutex_unlock(&childp->idx->lock); 751 fibril_mutex_unlock(&childp->lock);752 fibril_mutex_unlock(&parentp->lock);753 826 return rc; 754 827 } … … 767 840 return EOK; 768 841 } 769 842 770 843 fibril_mutex_lock(&nodep->idx->lock); 771 bs = block_bb_get(nodep->idx-> service_id);844 bs = block_bb_get(nodep->idx->devmap_handle); 772 845 773 846 blocks = nodep->size / BPS(bs); … … 775 848 for (i = 0; i < blocks; i++) { 776 849 fat_dentry_t *d; 777 850 778 851 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE); 779 852 if (rc != EOK) { … … 803 876 if (rc != EOK) { 804 877 fibril_mutex_unlock(&nodep->idx->lock); 805 return rc; 878 return rc; 806 879 } 807 880 } … … 828 901 } 829 902 903 char fat_plb_get_char(unsigned pos) 904 { 905 return fat_reg.plb_ro[pos % PLB_SIZE]; 906 } 907 830 908 bool fat_is_directory(fs_node_t *fn) 831 909 { … … 838 916 } 839 917 840 service_id_t fat_service_get(fs_node_t *node)918 devmap_handle_t fat_device_get(fs_node_t *node) 841 919 { 842 920 return 0; … … 858 936 .size_get = fat_size_get, 859 937 .lnkcnt_get = fat_lnkcnt_get, 938 .plb_get_char = fat_plb_get_char, 860 939 .is_directory = fat_is_directory, 861 940 .is_file = fat_is_file, 862 . service_get = fat_service_get941 .device_get = fat_device_get 863 942 }; 864 943 865 944 /* 866 * FAT VFS_OUToperations.945 * VFS operations. 867 946 */ 868 947 869 static int 870 fat_mounted(service_id_t service_id, const char *opts, fs_index_t *index, 871 aoff64_t *size, unsigned *linkcnt) 872 { 873 enum cache_mode cmode = CACHE_MODE_WB; 948 void fat_mounted(ipc_callid_t rid, ipc_call_t *request) 949 { 950 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 951 enum cache_mode cmode; 874 952 fat_bs_t *bs; 875 fat_instance_t *instance; 876 int rc; 877 878 instance = malloc(sizeof(fat_instance_t)); 879 if (!instance) 880 return ENOMEM; 881 instance->lfn_enabled = true; 882 883 /* Parse mount options. */ 884 char *mntopts = (char *) opts; 885 char *saveptr; 886 char *opt; 887 while ((opt = strtok_r(mntopts, " ,", &saveptr)) != NULL) { 888 if (str_cmp(opt, "wtcache") == 0) 889 cmode = CACHE_MODE_WT; 890 else if (str_cmp(opt, "nolfn") == 0) 891 instance->lfn_enabled = false; 892 mntopts = NULL; 893 } 953 954 /* Accept the mount options */ 955 char *opts; 956 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL); 957 958 if (rc != EOK) { 959 async_answer_0(rid, rc); 960 return; 961 } 962 963 /* Check for option enabling write through. */ 964 if (str_cmp(opts, "wtcache") == 0) 965 cmode = CACHE_MODE_WT; 966 else 967 cmode = CACHE_MODE_WB; 968 969 free(opts); 894 970 895 971 /* initialize libblock */ 896 rc = block_init( EXCHANGE_SERIALIZE, service_id, BS_SIZE);897 if (rc != EOK) { 898 free(instance);899 return rc;972 rc = block_init(devmap_handle, BS_SIZE); 973 if (rc != EOK) { 974 async_answer_0(rid, rc); 975 return; 900 976 } 901 977 902 978 /* prepare the boot block */ 903 rc = block_bb_read( service_id, BS_BLOCK);904 if (rc != EOK) { 905 free(instance);906 block_fini(service_id);907 return rc;979 rc = block_bb_read(devmap_handle, BS_BLOCK); 980 if (rc != EOK) { 981 block_fini(devmap_handle); 982 async_answer_0(rid, rc); 983 return; 908 984 } 909 985 910 986 /* get the buffer with the boot sector */ 911 bs = block_bb_get( service_id);987 bs = block_bb_get(devmap_handle); 912 988 913 989 if (BPS(bs) != BS_SIZE) { 914 free(instance);915 block_fini(service_id);916 return ENOTSUP;990 block_fini(devmap_handle); 991 async_answer_0(rid, ENOTSUP); 992 return; 917 993 } 918 994 919 995 /* Initialize the block cache */ 920 rc = block_cache_init( service_id, BPS(bs), 0 /* XXX */, cmode);921 if (rc != EOK) { 922 free(instance);923 block_fini(service_id);924 return rc;996 rc = block_cache_init(devmap_handle, BPS(bs), 0 /* XXX */, cmode); 997 if (rc != EOK) { 998 block_fini(devmap_handle); 999 async_answer_0(rid, rc); 1000 return; 925 1001 } 926 1002 927 1003 /* Do some simple sanity checks on the file system. */ 928 rc = fat_sanity_check(bs, service_id);929 if (rc != EOK) { 930 free(instance);931 (void) block_cache_fini(service_id);932 block_fini(service_id);933 return rc;934 } 935 936 rc = fat_idx_init_by_ service_id(service_id);937 if (rc != EOK) { 938 free(instance);939 (void) block_cache_fini(service_id);940 block_fini(service_id);941 return rc;1004 rc = fat_sanity_check(bs, devmap_handle); 1005 if (rc != EOK) { 1006 (void) block_cache_fini(devmap_handle); 1007 block_fini(devmap_handle); 1008 async_answer_0(rid, rc); 1009 return; 1010 } 1011 1012 rc = fat_idx_init_by_devmap_handle(devmap_handle); 1013 if (rc != EOK) { 1014 (void) block_cache_fini(devmap_handle); 1015 block_fini(devmap_handle); 1016 async_answer_0(rid, rc); 1017 return; 942 1018 } 943 1019 … … 945 1021 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t)); 946 1022 if (!rfn) { 947 free(instance); 948 (void) block_cache_fini(service_id); 949 block_fini(service_id); 950 fat_idx_fini_by_service_id(service_id); 951 return ENOMEM; 952 } 953 1023 (void) block_cache_fini(devmap_handle); 1024 block_fini(devmap_handle); 1025 fat_idx_fini_by_devmap_handle(devmap_handle); 1026 async_answer_0(rid, ENOMEM); 1027 return; 1028 } 954 1029 fs_node_initialize(rfn); 955 1030 fat_node_t *rootp = (fat_node_t *)malloc(sizeof(fat_node_t)); 956 1031 if (!rootp) { 957 free(instance);958 1032 free(rfn); 959 (void) block_cache_fini(service_id); 960 block_fini(service_id); 961 fat_idx_fini_by_service_id(service_id); 962 return ENOMEM; 1033 (void) block_cache_fini(devmap_handle); 1034 block_fini(devmap_handle); 1035 fat_idx_fini_by_devmap_handle(devmap_handle); 1036 async_answer_0(rid, ENOMEM); 1037 return; 963 1038 } 964 1039 fat_node_initialize(rootp); 965 1040 966 fat_idx_t *ridxp = fat_idx_get_by_pos( service_id, FAT_CLST_ROOTPAR, 0);1041 fat_idx_t *ridxp = fat_idx_get_by_pos(devmap_handle, FAT_CLST_ROOTPAR, 0); 967 1042 if (!ridxp) { 968 free(instance);969 1043 free(rfn); 970 1044 free(rootp); 971 (void) block_cache_fini(service_id); 972 block_fini(service_id); 973 fat_idx_fini_by_service_id(service_id); 974 return ENOMEM; 1045 (void) block_cache_fini(devmap_handle); 1046 block_fini(devmap_handle); 1047 fat_idx_fini_by_devmap_handle(devmap_handle); 1048 async_answer_0(rid, ENOMEM); 1049 return; 975 1050 } 976 1051 assert(ridxp->index == 0); … … 978 1053 979 1054 rootp->type = FAT_DIRECTORY; 980 rootp->firstc = FAT_ ROOT_CLST(bs);1055 rootp->firstc = FAT_CLST_ROOT; 981 1056 rootp->refcnt = 1; 982 1057 rootp->lnkcnt = 0; /* FS root is not linked */ 983 984 if (FAT_IS_FAT32(bs)) { 985 uint32_t clusters; 986 rc = fat_clusters_get(&clusters, bs, service_id, rootp->firstc); 987 if (rc != EOK) { 988 fibril_mutex_unlock(&ridxp->lock); 989 free(instance); 990 free(rfn); 991 free(rootp); 992 (void) block_cache_fini(service_id); 993 block_fini(service_id); 994 fat_idx_fini_by_service_id(service_id); 995 return ENOTSUP; 996 } 997 rootp->size = BPS(bs) * SPC(bs) * clusters; 998 } else 999 rootp->size = RDE(bs) * sizeof(fat_dentry_t); 1000 1001 rc = fs_instance_create(service_id, instance); 1002 if (rc != EOK) { 1003 fibril_mutex_unlock(&ridxp->lock); 1004 free(instance); 1005 free(rfn); 1006 free(rootp); 1007 (void) block_cache_fini(service_id); 1008 block_fini(service_id); 1009 fat_idx_fini_by_service_id(service_id); 1010 return rc; 1011 } 1012 1058 rootp->size = RDE(bs) * sizeof(fat_dentry_t); 1013 1059 rootp->idx = ridxp; 1014 1060 ridxp->nodep = rootp; 1015 1061 rootp->bp = rfn; 1016 1062 rfn->data = rootp; 1017 1063 1018 1064 fibril_mutex_unlock(&ridxp->lock); 1019 1065 1020 *index = ridxp->index; 1021 *size = rootp->size; 1022 *linkcnt = rootp->lnkcnt; 1023 1024 return EOK; 1025 } 1026 1027 static int fat_update_fat32_fsinfo(service_id_t service_id) 1028 { 1029 fat_bs_t *bs; 1030 fat32_fsinfo_t *info; 1031 block_t *b; 1032 int rc; 1033 1034 bs = block_bb_get(service_id); 1035 assert(FAT_IS_FAT32(bs)); 1036 1037 rc = block_get(&b, service_id, uint16_t_le2host(bs->fat32.fsinfo_sec), 1038 BLOCK_FLAGS_NONE); 1039 if (rc != EOK) 1040 return rc; 1041 1042 info = (fat32_fsinfo_t *) b->data; 1043 1044 if (bcmp(info->sig1, FAT32_FSINFO_SIG1, sizeof(info->sig1)) || 1045 bcmp(info->sig2, FAT32_FSINFO_SIG2, sizeof(info->sig2)) || 1046 bcmp(info->sig3, FAT32_FSINFO_SIG3, sizeof(info->sig3))) { 1047 (void) block_put(b); 1048 return EINVAL; 1049 } 1050 1051 /* For now, invalidate the counter. */ 1052 info->free_clusters = host2uint16_t_le(-1); 1053 1054 b->dirty = true; 1055 return block_put(b); 1056 } 1057 1058 static int fat_unmounted(service_id_t service_id) 1059 { 1066 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); 1067 } 1068 1069 void fat_mount(ipc_callid_t rid, ipc_call_t *request) 1070 { 1071 libfs_mount(&fat_libfs_ops, fat_reg.fs_handle, rid, request); 1072 } 1073 1074 void fat_unmounted(ipc_callid_t rid, ipc_call_t *request) 1075 { 1076 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1060 1077 fs_node_t *fn; 1061 1078 fat_node_t *nodep; 1062 fat_bs_t *bs;1063 1079 int rc; 1064 1080 1065 bs = block_bb_get(service_id);1066 1067 rc = fat_root_get(&fn, service_id);1068 if (rc != EOK)1069 return rc;1081 rc = fat_root_get(&fn, devmap_handle); 1082 if (rc != EOK) { 1083 async_answer_0(rid, rc); 1084 return; 1085 } 1070 1086 nodep = FAT_NODE(fn); 1071 1087 … … 1076 1092 if (nodep->refcnt != 2) { 1077 1093 (void) fat_node_put(fn); 1078 return EBUSY; 1079 } 1080 1081 if (FAT_IS_FAT32(bs)) { 1082 /* 1083 * Attempt to update the FAT32 FS info. 1084 */ 1085 (void) fat_update_fat32_fsinfo(service_id); 1086 } 1087 1094 async_answer_0(rid, EBUSY); 1095 return; 1096 } 1097 1088 1098 /* 1089 1099 * Put the root node and force it to the FAT free node list. … … 1097 1107 * stop using libblock for this instance. 1098 1108 */ 1099 (void) fat_node_fini_by_service_id(service_id); 1100 fat_idx_fini_by_service_id(service_id); 1101 (void) block_cache_fini(service_id); 1102 block_fini(service_id); 1103 1104 void *data; 1105 if (fs_instance_get(service_id, &data) == EOK) { 1106 fs_instance_destroy(service_id); 1107 free(data); 1108 } 1109 1110 return EOK; 1111 } 1112 1113 static int 1114 fat_read(service_id_t service_id, fs_index_t index, aoff64_t pos, 1115 size_t *rbytes) 1116 { 1109 (void) fat_node_fini_by_devmap_handle(devmap_handle); 1110 fat_idx_fini_by_devmap_handle(devmap_handle); 1111 (void) block_cache_fini(devmap_handle); 1112 block_fini(devmap_handle); 1113 1114 async_answer_0(rid, EOK); 1115 } 1116 1117 void fat_unmount(ipc_callid_t rid, ipc_call_t *request) 1118 { 1119 libfs_unmount(&fat_libfs_ops, rid, request); 1120 } 1121 1122 void fat_lookup(ipc_callid_t rid, ipc_call_t *request) 1123 { 1124 libfs_lookup(&fat_libfs_ops, fat_reg.fs_handle, rid, request); 1125 } 1126 1127 void fat_read(ipc_callid_t rid, ipc_call_t *request) 1128 { 1129 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1130 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1131 aoff64_t pos = 1132 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request)); 1117 1133 fs_node_t *fn; 1118 1134 fat_node_t *nodep; … … 1122 1138 int rc; 1123 1139 1124 rc = fat_node_get(&fn, service_id, index); 1125 if (rc != EOK) 1126 return rc; 1127 if (!fn) 1128 return ENOENT; 1140 rc = fat_node_get(&fn, devmap_handle, index); 1141 if (rc != EOK) { 1142 async_answer_0(rid, rc); 1143 return; 1144 } 1145 if (!fn) { 1146 async_answer_0(rid, ENOENT); 1147 return; 1148 } 1129 1149 nodep = FAT_NODE(fn); 1130 1150 … … 1134 1154 fat_node_put(fn); 1135 1155 async_answer_0(callid, EINVAL); 1136 return EINVAL; 1137 } 1138 1139 bs = block_bb_get(service_id); 1156 async_answer_0(rid, EINVAL); 1157 return; 1158 } 1159 1160 bs = block_bb_get(devmap_handle); 1140 1161 1141 1162 if (nodep->type == FAT_FILE) { … … 1157 1178 fat_node_put(fn); 1158 1179 async_answer_0(callid, rc); 1159 return rc; 1180 async_answer_0(rid, rc); 1181 return; 1160 1182 } 1161 1183 (void) async_data_read_finalize(callid, … … 1164 1186 if (rc != EOK) { 1165 1187 fat_node_put(fn); 1166 return rc; 1188 async_answer_0(rid, rc); 1189 return; 1167 1190 } 1168 1191 } 1169 1192 } else { 1193 unsigned bnum; 1170 1194 aoff64_t spos = pos; 1171 char name[FAT_ LFN_NAME_SIZE];1195 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; 1172 1196 fat_dentry_t *d; 1173 1197 … … 1176 1200 assert(BPS(bs) % sizeof(fat_dentry_t) == 0); 1177 1201 1178 fat_directory_t di; 1179 rc = fat_directory_open(nodep, &di); 1180 if (rc != EOK) 1181 goto err; 1182 rc = fat_directory_seek(&di, pos); 1183 if (rc != EOK) { 1184 (void) fat_directory_close(&di); 1185 goto err; 1186 } 1187 1188 rc = fat_directory_read(&di, name, &d); 1189 if (rc == EOK) 1190 goto hit; 1191 if (rc == ENOENT) 1192 goto miss; 1202 /* 1203 * Our strategy for readdir() is to use the position pointer as 1204 * an index into the array of all dentries. On entry, it points 1205 * to the first unread dentry. If we skip any dentries, we bump 1206 * the position pointer accordingly. 1207 */ 1208 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs); 1209 while (bnum < nodep->size / BPS(bs)) { 1210 aoff64_t o; 1211 1212 rc = fat_block_get(&b, bs, nodep, bnum, 1213 BLOCK_FLAGS_NONE); 1214 if (rc != EOK) 1215 goto err; 1216 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t)); 1217 o < BPS(bs) / sizeof(fat_dentry_t); 1218 o++, pos++) { 1219 d = ((fat_dentry_t *)b->data) + o; 1220 switch (fat_classify_dentry(d)) { 1221 case FAT_DENTRY_SKIP: 1222 case FAT_DENTRY_FREE: 1223 continue; 1224 case FAT_DENTRY_LAST: 1225 rc = block_put(b); 1226 if (rc != EOK) 1227 goto err; 1228 goto miss; 1229 default: 1230 case FAT_DENTRY_VALID: 1231 fat_dentry_name_get(d, name); 1232 rc = block_put(b); 1233 if (rc != EOK) 1234 goto err; 1235 goto hit; 1236 } 1237 } 1238 rc = block_put(b); 1239 if (rc != EOK) 1240 goto err; 1241 bnum++; 1242 } 1243 miss: 1244 rc = fat_node_put(fn); 1245 async_answer_0(callid, rc != EOK ? rc : ENOENT); 1246 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0); 1247 return; 1193 1248 1194 1249 err: 1195 1250 (void) fat_node_put(fn); 1196 1251 async_answer_0(callid, rc); 1197 return rc; 1198 1199 miss: 1200 rc = fat_directory_close(&di); 1201 if (rc != EOK) 1202 goto err; 1203 rc = fat_node_put(fn); 1204 async_answer_0(callid, rc != EOK ? rc : ENOENT); 1205 *rbytes = 0; 1206 return rc != EOK ? rc : ENOENT; 1252 async_answer_0(rid, rc); 1253 return; 1207 1254 1208 1255 hit: 1209 pos = di.pos; 1210 rc = fat_directory_close(&di); 1211 if (rc != EOK) 1212 goto err; 1213 (void) async_data_read_finalize(callid, name, 1214 str_size(name) + 1); 1256 (void) async_data_read_finalize(callid, name, str_size(name) + 1); 1215 1257 bytes = (pos - spos) + 1; 1216 1258 } 1217 1259 1218 1260 rc = fat_node_put(fn); 1219 *rbytes = bytes; 1220 return rc; 1221 } 1222 1223 static int 1224 fat_write(service_id_t service_id, fs_index_t index, aoff64_t pos, 1225 size_t *wbytes, aoff64_t *nsize) 1226 { 1261 async_answer_1(rid, rc, (sysarg_t)bytes); 1262 } 1263 1264 void fat_write(ipc_callid_t rid, ipc_call_t *request) 1265 { 1266 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1267 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1268 aoff64_t pos = 1269 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request)); 1227 1270 fs_node_t *fn; 1228 1271 fat_node_t *nodep; 1229 1272 fat_bs_t *bs; 1230 size_t bytes ;1273 size_t bytes, size; 1231 1274 block_t *b; 1232 1275 aoff64_t boundary; … … 1234 1277 int rc; 1235 1278 1236 rc = fat_node_get(&fn, service_id, index); 1237 if (rc != EOK) 1238 return rc; 1239 if (!fn) 1240 return ENOENT; 1279 rc = fat_node_get(&fn, devmap_handle, index); 1280 if (rc != EOK) { 1281 async_answer_0(rid, rc); 1282 return; 1283 } 1284 if (!fn) { 1285 async_answer_0(rid, ENOENT); 1286 return; 1287 } 1241 1288 nodep = FAT_NODE(fn); 1242 1289 1243 1290 ipc_callid_t callid; 1244 1291 size_t len; … … 1246 1293 (void) fat_node_put(fn); 1247 1294 async_answer_0(callid, EINVAL); 1248 return EINVAL; 1249 } 1250 1251 bs = block_bb_get(service_id); 1295 async_answer_0(rid, EINVAL); 1296 return; 1297 } 1298 1299 bs = block_bb_get(devmap_handle); 1252 1300 1253 1301 /* … … 1256 1304 * but this one greatly simplifies fat_write(). Note that we can afford 1257 1305 * to do this because the client must be ready to handle the return 1258 * value signalizing a smaller number of bytes written. 1259 */ 1306 * value signalizing a smaller number of bytes written. 1307 */ 1260 1308 bytes = min(len, BPS(bs) - pos % BPS(bs)); 1261 1309 if (bytes == BPS(bs)) 1262 1310 flags |= BLOCK_FLAGS_NOREAD; 1263 1311 1264 1312 boundary = ROUND_UP(nodep->size, BPC(bs)); 1265 1313 if (pos < boundary) { … … 1274 1322 (void) fat_node_put(fn); 1275 1323 async_answer_0(callid, rc); 1276 return rc; 1324 async_answer_0(rid, rc); 1325 return; 1277 1326 } 1278 1327 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags); … … 1280 1329 (void) fat_node_put(fn); 1281 1330 async_answer_0(callid, rc); 1282 return rc; 1331 async_answer_0(rid, rc); 1332 return; 1283 1333 } 1284 1334 (void) async_data_write_finalize(callid, … … 1288 1338 if (rc != EOK) { 1289 1339 (void) fat_node_put(fn); 1290 return rc; 1340 async_answer_0(rid, rc); 1341 return; 1291 1342 } 1292 1343 if (pos + bytes > nodep->size) { … … 1294 1345 nodep->dirty = true; /* need to sync node */ 1295 1346 } 1296 *wbytes = bytes; 1297 *nsize = nodep->size; 1347 size = nodep->size; 1298 1348 rc = fat_node_put(fn); 1299 return rc; 1349 async_answer_2(rid, rc, bytes, nodep->size); 1350 return; 1300 1351 } else { 1301 1352 /* … … 1304 1355 */ 1305 1356 unsigned nclsts; 1306 fat_cluster_t mcl, lcl; 1307 1357 fat_cluster_t mcl, lcl; 1358 1308 1359 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs); 1309 1360 /* create an independent chain of nclsts clusters in all FATs */ 1310 rc = fat_alloc_clusters(bs, service_id, nclsts, &mcl, &lcl);1361 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl); 1311 1362 if (rc != EOK) { 1312 1363 /* could not allocate a chain of nclsts clusters */ 1313 1364 (void) fat_node_put(fn); 1314 1365 async_answer_0(callid, rc); 1315 return rc; 1366 async_answer_0(rid, rc); 1367 return; 1316 1368 } 1317 1369 /* zero fill any gaps */ 1318 1370 rc = fat_fill_gap(bs, nodep, mcl, pos); 1319 1371 if (rc != EOK) { 1320 (void) fat_free_clusters(bs, service_id, mcl);1372 (void) fat_free_clusters(bs, devmap_handle, mcl); 1321 1373 (void) fat_node_put(fn); 1322 1374 async_answer_0(callid, rc); 1323 return rc; 1324 } 1325 rc = _fat_block_get(&b, bs, service_id, lcl, NULL, 1375 async_answer_0(rid, rc); 1376 return; 1377 } 1378 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL, 1326 1379 (pos / BPS(bs)) % SPC(bs), flags); 1327 1380 if (rc != EOK) { 1328 (void) fat_free_clusters(bs, service_id, mcl);1381 (void) fat_free_clusters(bs, devmap_handle, mcl); 1329 1382 (void) fat_node_put(fn); 1330 1383 async_answer_0(callid, rc); 1331 return rc; 1384 async_answer_0(rid, rc); 1385 return; 1332 1386 } 1333 1387 (void) async_data_write_finalize(callid, … … 1336 1390 rc = block_put(b); 1337 1391 if (rc != EOK) { 1338 (void) fat_free_clusters(bs, service_id, mcl);1392 (void) fat_free_clusters(bs, devmap_handle, mcl); 1339 1393 (void) fat_node_put(fn); 1340 return rc; 1394 async_answer_0(rid, rc); 1395 return; 1341 1396 } 1342 1397 /* … … 1346 1401 rc = fat_append_clusters(bs, nodep, mcl, lcl); 1347 1402 if (rc != EOK) { 1348 (void) fat_free_clusters(bs, service_id, mcl);1403 (void) fat_free_clusters(bs, devmap_handle, mcl); 1349 1404 (void) fat_node_put(fn); 1350 return rc; 1351 } 1352 *nsize = nodep->size = pos + bytes; 1405 async_answer_0(rid, rc); 1406 return; 1407 } 1408 nodep->size = size = pos + bytes; 1409 nodep->dirty = true; /* need to sync node */ 1353 1410 rc = fat_node_put(fn); 1354 nodep->dirty = true; /* need to sync node */ 1355 *wbytes = bytes; 1356 return rc; 1357 } 1358 } 1359 1360 static int 1361 fat_truncate(service_id_t service_id, fs_index_t index, aoff64_t size) 1362 { 1411 async_answer_2(rid, rc, bytes, size); 1412 return; 1413 } 1414 } 1415 1416 void fat_truncate(ipc_callid_t rid, ipc_call_t *request) 1417 { 1418 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1419 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1420 aoff64_t size = 1421 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request)); 1363 1422 fs_node_t *fn; 1364 1423 fat_node_t *nodep; … … 1366 1425 int rc; 1367 1426 1368 rc = fat_node_get(&fn, service_id, index); 1369 if (rc != EOK) 1370 return rc; 1371 if (!fn) 1372 return ENOENT; 1427 rc = fat_node_get(&fn, devmap_handle, index); 1428 if (rc != EOK) { 1429 async_answer_0(rid, rc); 1430 return; 1431 } 1432 if (!fn) { 1433 async_answer_0(rid, ENOENT); 1434 return; 1435 } 1373 1436 nodep = FAT_NODE(fn); 1374 1437 1375 bs = block_bb_get( service_id);1438 bs = block_bb_get(devmap_handle); 1376 1439 1377 1440 if (nodep->size == size) { … … 1389 1452 nodep->size = size; 1390 1453 nodep->dirty = true; /* need to sync node */ 1391 rc = EOK; 1454 rc = EOK; 1392 1455 } else { 1393 1456 /* … … 1400 1463 } else { 1401 1464 fat_cluster_t lastc; 1402 rc = fat_cluster_walk(bs, service_id, nodep->firstc,1465 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc, 1403 1466 &lastc, NULL, (size - 1) / BPC(bs)); 1404 1467 if (rc != EOK) … … 1410 1473 nodep->size = size; 1411 1474 nodep->dirty = true; /* need to sync node */ 1412 rc = EOK; 1475 rc = EOK; 1413 1476 } 1414 1477 out: 1415 1478 fat_node_put(fn); 1416 return rc; 1417 } 1418 1419 static int fat_close(service_id_t service_id, fs_index_t index) 1420 { 1421 return EOK; 1422 } 1423 1424 static int fat_destroy(service_id_t service_id, fs_index_t index) 1425 { 1479 async_answer_0(rid, rc); 1480 return; 1481 } 1482 1483 void fat_close(ipc_callid_t rid, ipc_call_t *request) 1484 { 1485 async_answer_0(rid, EOK); 1486 } 1487 1488 void fat_destroy(ipc_callid_t rid, ipc_call_t *request) 1489 { 1490 devmap_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request); 1491 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); 1426 1492 fs_node_t *fn; 1427 1493 fat_node_t *nodep; 1428 1494 int rc; 1429 1495 1430 rc = fat_node_get(&fn, service_id, index); 1431 if (rc != EOK) 1432 return rc; 1433 if (!fn) 1434 return ENOENT; 1496 rc = fat_node_get(&fn, devmap_handle, index); 1497 if (rc != EOK) { 1498 async_answer_0(rid, rc); 1499 return; 1500 } 1501 if (!fn) { 1502 async_answer_0(rid, ENOENT); 1503 return; 1504 } 1435 1505 1436 1506 nodep = FAT_NODE(fn); … … 1442 1512 1443 1513 rc = fat_destroy_node(fn); 1444 return rc; 1445 } 1446 1447 static int fat_sync(service_id_t service_id, fs_index_t index) 1448 { 1514 async_answer_0(rid, rc); 1515 } 1516 1517 void fat_open_node(ipc_callid_t rid, ipc_call_t *request) 1518 { 1519 libfs_open_node(&fat_libfs_ops, fat_reg.fs_handle, rid, request); 1520 } 1521 1522 void fat_stat(ipc_callid_t rid, ipc_call_t *request) 1523 { 1524 libfs_stat(&fat_libfs_ops, fat_reg.fs_handle, rid, request); 1525 } 1526 1527 void fat_sync(ipc_callid_t rid, ipc_call_t *request) 1528 { 1529 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1530 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1531 1449 1532 fs_node_t *fn; 1450 int rc = fat_node_get(&fn, service_id, index); 1451 if (rc != EOK) 1452 return rc; 1453 if (!fn) 1454 return ENOENT; 1455 1533 int rc = fat_node_get(&fn, devmap_handle, index); 1534 if (rc != EOK) { 1535 async_answer_0(rid, rc); 1536 return; 1537 } 1538 if (!fn) { 1539 async_answer_0(rid, ENOENT); 1540 return; 1541 } 1542 1456 1543 fat_node_t *nodep = FAT_NODE(fn); 1457 1544 1458 1545 nodep->dirty = true; 1459 1546 rc = fat_node_sync(nodep); 1460 1547 1461 1548 fat_node_put(fn); 1462 return rc; 1463 } 1464 1465 vfs_out_ops_t fat_ops = { 1466 .mounted = fat_mounted, 1467 .unmounted = fat_unmounted, 1468 .read = fat_read, 1469 .write = fat_write, 1470 .truncate = fat_truncate, 1471 .close = fat_close, 1472 .destroy = fat_destroy, 1473 .sync = fat_sync, 1474 }; 1549 async_answer_0(rid, rc); 1550 } 1475 1551 1476 1552 /**
Note:
See TracChangeset
for help on using the changeset viewer.