Changes in uspace/srv/fs/fat/fat_ops.c [991f645:69a60c4] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/fs/fat/fat_ops.c
r991f645 r69a60c4 60 60 #define FS_NODE(node) ((node) ? (node)->bp : NULL) 61 61 62 #define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))63 #define BPC(bs) (BPS((bs)) * SPC((bs)))64 65 62 /** Mutex protecting the list of cached free FAT nodes. */ 66 63 static FIBRIL_MUTEX_INITIALIZE(ffn_mutex); … … 72 69 * Forward declarations of FAT libfs operations. 73 70 */ 74 static int fat_root_get(fs_node_t **, dev map_handle_t);71 static int fat_root_get(fs_node_t **, dev_handle_t); 75 72 static int fat_match(fs_node_t **, fs_node_t *, const char *); 76 static int fat_node_get(fs_node_t **, dev map_handle_t, fs_index_t);73 static int fat_node_get(fs_node_t **, dev_handle_t, fs_index_t); 77 74 static int fat_node_open(fs_node_t *); 78 75 static int fat_node_put(fs_node_t *); 79 static int fat_create_node(fs_node_t **, dev map_handle_t, int);76 static int fat_create_node(fs_node_t **, dev_handle_t, int); 80 77 static int fat_destroy_node(fs_node_t *); 81 78 static int fat_link(fs_node_t *, fs_node_t *, const char *); … … 88 85 static bool fat_is_directory(fs_node_t *); 89 86 static bool fat_is_file(fs_node_t *node); 90 static dev map_handle_t fat_device_get(fs_node_t *node);87 static dev_handle_t fat_device_get(fs_node_t *node); 91 88 92 89 /* … … 104 101 node->refcnt = 0; 105 102 node->dirty = false; 106 node->lastc_cached_valid = false;107 node->lastc_cached_value = FAT_CLST_LAST1;108 node->currc_cached_valid = false;109 node->currc_cached_bn = 0;110 node->currc_cached_value = FAT_CLST_LAST1;111 103 } 112 104 … … 116 108 fat_bs_t *bs; 117 109 fat_dentry_t *d; 110 uint16_t bps; 111 unsigned dps; 118 112 int rc; 119 113 120 114 assert(node->dirty); 121 115 122 bs = block_bb_get(node->idx->devmap_handle); 116 bs = block_bb_get(node->idx->dev_handle); 117 bps = uint16_t_le2host(bs->bps); 118 dps = bps / sizeof(fat_dentry_t); 123 119 124 120 /* Read the block that contains the dentry of interest. */ 125 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc, 126 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 127 BLOCK_FLAGS_NONE); 121 rc = _fat_block_get(&b, bs, node->idx->dev_handle, node->idx->pfc, 122 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); 128 123 if (rc != EOK) 129 124 return rc; 130 125 131 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs));126 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); 132 127 133 128 d->firstc = host2uint16_t_le(node->firstc); … … 145 140 } 146 141 147 static int fat_node_fini_by_dev map_handle(devmap_handle_t devmap_handle)142 static int fat_node_fini_by_dev_handle(dev_handle_t dev_handle) 148 143 { 149 144 link_t *lnk; … … 170 165 goto restart; 171 166 } 172 if (nodep->idx->dev map_handle != devmap_handle) {167 if (nodep->idx->dev_handle != dev_handle) { 173 168 fibril_mutex_unlock(&nodep->idx->lock); 174 169 fibril_mutex_unlock(&nodep->lock); … … 271 266 fat_dentry_t *d; 272 267 fat_node_t *nodep = NULL; 268 unsigned bps; 269 unsigned spc; 270 unsigned dps; 273 271 int rc; 274 272 … … 299 297 return rc; 300 298 301 bs = block_bb_get(idxp->devmap_handle); 299 bs = block_bb_get(idxp->dev_handle); 300 bps = uint16_t_le2host(bs->bps); 301 spc = bs->spc; 302 dps = bps / sizeof(fat_dentry_t); 302 303 303 304 /* Read the block that contains the dentry of interest. */ 304 rc = _fat_block_get(&b, bs, idxp->dev map_handle, idxp->pfc, NULL,305 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE);305 rc = _fat_block_get(&b, bs, idxp->dev_handle, idxp->pfc, 306 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); 306 307 if (rc != EOK) { 307 308 (void) fat_node_put(FS_NODE(nodep)); … … 309 310 } 310 311 311 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs));312 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); 312 313 if (d->attr & FAT_ATTR_SUBDIR) { 313 314 /* … … 323 324 */ 324 325 uint16_t clusters; 325 rc = fat_clusters_get(&clusters, bs, idxp->dev map_handle,326 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle, 326 327 uint16_t_le2host(d->firstc)); 327 328 if (rc != EOK) { … … 329 330 return rc; 330 331 } 331 nodep->size = BPS(bs) * SPC(bs)* clusters;332 nodep->size = bps * spc * clusters; 332 333 } else { 333 334 nodep->type = FAT_FILE; … … 356 357 */ 357 358 358 int fat_root_get(fs_node_t **rfn, dev map_handle_t devmap_handle)359 { 360 return fat_node_get(rfn, dev map_handle, 0);359 int fat_root_get(fs_node_t **rfn, dev_handle_t dev_handle) 360 { 361 return fat_node_get(rfn, dev_handle, 0); 361 362 } 362 363 … … 367 368 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; 368 369 unsigned i, j; 370 unsigned bps; /* bytes per sector */ 371 unsigned dps; /* dentries per sector */ 369 372 unsigned blocks; 370 373 fat_dentry_t *d; 371 devmap_handle_t devmap_handle;372 374 block_t *b; 373 375 int rc; 374 376 375 377 fibril_mutex_lock(&parentp->idx->lock); 376 devmap_handle = parentp->idx->devmap_handle; 377 fibril_mutex_unlock(&parentp->idx->lock); 378 379 bs = block_bb_get(devmap_handle); 380 blocks = parentp->size / BPS(bs); 378 bs = block_bb_get(parentp->idx->dev_handle); 379 bps = uint16_t_le2host(bs->bps); 380 dps = bps / sizeof(fat_dentry_t); 381 blocks = parentp->size / bps; 381 382 for (i = 0; i < blocks; i++) { 382 383 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 383 if (rc != EOK) 384 if (rc != EOK) { 385 fibril_mutex_unlock(&parentp->idx->lock); 384 386 return rc; 385 for (j = 0; j < DPS(bs); j++) { 387 } 388 for (j = 0; j < dps; j++) { 386 389 d = ((fat_dentry_t *)b->data) + j; 387 390 switch (fat_classify_dentry(d)) { … … 392 395 /* miss */ 393 396 rc = block_put(b); 397 fibril_mutex_unlock(&parentp->idx->lock); 394 398 *rfn = NULL; 395 399 return rc; … … 402 406 /* hit */ 403 407 fat_node_t *nodep; 404 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle, 405 parentp->firstc, i * DPS(bs) + j); 408 /* 409 * Assume tree hierarchy for locking. We 410 * already have the parent and now we are going 411 * to lock the child. Never lock in the oposite 412 * order. 413 */ 414 fat_idx_t *idx = fat_idx_get_by_pos( 415 parentp->idx->dev_handle, parentp->firstc, 416 i * dps + j); 417 fibril_mutex_unlock(&parentp->idx->lock); 406 418 if (!idx) { 407 419 /* … … 426 438 } 427 439 rc = block_put(b); 428 if (rc != EOK) 440 if (rc != EOK) { 441 fibril_mutex_unlock(&parentp->idx->lock); 429 442 return rc; 430 } 431 443 } 444 } 445 446 fibril_mutex_unlock(&parentp->idx->lock); 432 447 *rfn = NULL; 433 448 return EOK; … … 435 450 436 451 /** Instantiate a FAT in-core node. */ 437 int fat_node_get(fs_node_t **rfn, dev map_handle_t devmap_handle, fs_index_t index)452 int fat_node_get(fs_node_t **rfn, dev_handle_t dev_handle, fs_index_t index) 438 453 { 439 454 fat_node_t *nodep; … … 441 456 int rc; 442 457 443 idxp = fat_idx_get_by_index(dev map_handle, index);458 idxp = fat_idx_get_by_index(dev_handle, index); 444 459 if (!idxp) { 445 460 *rfn = NULL; … … 492 507 } 493 508 494 int fat_create_node(fs_node_t **rfn, dev map_handle_t devmap_handle, int flags)509 int fat_create_node(fs_node_t **rfn, dev_handle_t dev_handle, int flags) 495 510 { 496 511 fat_idx_t *idxp; … … 498 513 fat_bs_t *bs; 499 514 fat_cluster_t mcl, lcl; 515 uint16_t bps; 500 516 int rc; 501 517 502 bs = block_bb_get(devmap_handle); 518 bs = block_bb_get(dev_handle); 519 bps = uint16_t_le2host(bs->bps); 503 520 if (flags & L_DIRECTORY) { 504 521 /* allocate a cluster */ 505 rc = fat_alloc_clusters(bs, dev map_handle, 1, &mcl, &lcl);522 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl); 506 523 if (rc != EOK) 507 524 return rc; 508 525 /* populate the new cluster with unused dentries */ 509 rc = fat_zero_cluster(bs, dev map_handle, mcl);510 if (rc != EOK) { 511 (void) fat_free_clusters(bs, dev map_handle, mcl);526 rc = fat_zero_cluster(bs, dev_handle, mcl); 527 if (rc != EOK) { 528 (void) fat_free_clusters(bs, dev_handle, mcl); 512 529 return rc; 513 530 } … … 516 533 rc = fat_node_get_new(&nodep); 517 534 if (rc != EOK) { 518 (void) fat_free_clusters(bs, dev map_handle, mcl);535 (void) fat_free_clusters(bs, dev_handle, mcl); 519 536 return rc; 520 537 } 521 rc = fat_idx_get_new(&idxp, dev map_handle);522 if (rc != EOK) { 523 (void) fat_free_clusters(bs, dev map_handle, mcl);538 rc = fat_idx_get_new(&idxp, dev_handle); 539 if (rc != EOK) { 540 (void) fat_free_clusters(bs, dev_handle, mcl); 524 541 (void) fat_node_put(FS_NODE(nodep)); 525 542 return rc; … … 529 546 nodep->type = FAT_DIRECTORY; 530 547 nodep->firstc = mcl; 531 nodep->size = BPS(bs) * SPC(bs);548 nodep->size = bps * bs->spc; 532 549 } else { 533 550 nodep->type = FAT_FILE; … … 570 587 assert(!has_children); 571 588 572 bs = block_bb_get(nodep->idx->dev map_handle);589 bs = block_bb_get(nodep->idx->dev_handle); 573 590 if (nodep->firstc != FAT_CLST_RES0) { 574 591 assert(nodep->size); 575 592 /* Free all clusters allocated to the node. */ 576 rc = fat_free_clusters(bs, nodep->idx->dev map_handle,593 rc = fat_free_clusters(bs, nodep->idx->dev_handle, 577 594 nodep->firstc); 578 595 } … … 592 609 block_t *b; 593 610 unsigned i, j; 611 uint16_t bps; 612 unsigned dps; 594 613 unsigned blocks; 595 614 fat_cluster_t mcl, lcl; … … 620 639 621 640 fibril_mutex_lock(&parentp->idx->lock); 622 bs = block_bb_get(parentp->idx->devmap_handle); 623 624 blocks = parentp->size / BPS(bs); 641 bs = block_bb_get(parentp->idx->dev_handle); 642 bps = uint16_t_le2host(bs->bps); 643 dps = bps / sizeof(fat_dentry_t); 644 645 blocks = parentp->size / bps; 625 646 626 647 for (i = 0; i < blocks; i++) { … … 630 651 return rc; 631 652 } 632 for (j = 0; j < DPS(bs); j++) {653 for (j = 0; j < dps; j++) { 633 654 d = ((fat_dentry_t *)b->data) + j; 634 655 switch (fat_classify_dentry(d)) { … … 659 680 return ENOSPC; 660 681 } 661 rc = fat_alloc_clusters(bs, parentp->idx->dev map_handle, 1, &mcl, &lcl);682 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl); 662 683 if (rc != EOK) { 663 684 fibril_mutex_unlock(&parentp->idx->lock); 664 685 return rc; 665 686 } 666 rc = fat_zero_cluster(bs, parentp->idx->dev map_handle, mcl);667 if (rc != EOK) { 668 (void) fat_free_clusters(bs, parentp->idx->dev map_handle, mcl);687 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl); 688 if (rc != EOK) { 689 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl); 669 690 fibril_mutex_unlock(&parentp->idx->lock); 670 691 return rc; 671 692 } 672 rc = fat_append_clusters(bs, parentp, mcl , lcl);673 if (rc != EOK) { 674 (void) fat_free_clusters(bs, parentp->idx->dev map_handle, mcl);693 rc = fat_append_clusters(bs, parentp, mcl); 694 if (rc != EOK) { 695 (void) fat_free_clusters(bs, parentp->idx->dev_handle, mcl); 675 696 fibril_mutex_unlock(&parentp->idx->lock); 676 697 return rc; 677 698 } 678 parentp->size += BPS(bs) * SPC(bs);699 parentp->size += bps * bs->spc; 679 700 parentp->dirty = true; /* need to sync node */ 680 701 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); … … 750 771 751 772 childp->idx->pfc = parentp->firstc; 752 childp->idx->pdi = i * DPS(bs)+ j;773 childp->idx->pdi = i * dps + j; 753 774 fibril_mutex_unlock(&childp->idx->lock); 754 775 … … 772 793 fat_bs_t *bs; 773 794 fat_dentry_t *d; 795 uint16_t bps; 774 796 block_t *b; 775 797 bool has_children; … … 789 811 assert(childp->lnkcnt == 1); 790 812 fibril_mutex_lock(&childp->idx->lock); 791 bs = block_bb_get(childp->idx->devmap_handle); 792 793 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc, 794 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 813 bs = block_bb_get(childp->idx->dev_handle); 814 bps = uint16_t_le2host(bs->bps); 815 816 rc = _fat_block_get(&b, bs, childp->idx->dev_handle, childp->idx->pfc, 817 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps, 795 818 BLOCK_FLAGS_NONE); 796 819 if (rc != EOK) 797 820 goto error; 798 821 d = (fat_dentry_t *)b->data + 799 (childp->idx->pdi % ( BPS(bs)/ sizeof(fat_dentry_t)));822 (childp->idx->pdi % (bps / sizeof(fat_dentry_t))); 800 823 /* mark the dentry as not-currently-used */ 801 824 d->name[0] = FAT_DENTRY_ERASED; … … 829 852 fat_bs_t *bs; 830 853 fat_node_t *nodep = FAT_NODE(fn); 854 unsigned bps; 855 unsigned dps; 831 856 unsigned blocks; 832 857 block_t *b; … … 840 865 841 866 fibril_mutex_lock(&nodep->idx->lock); 842 bs = block_bb_get(nodep->idx->devmap_handle); 843 844 blocks = nodep->size / BPS(bs); 867 bs = block_bb_get(nodep->idx->dev_handle); 868 bps = uint16_t_le2host(bs->bps); 869 dps = bps / sizeof(fat_dentry_t); 870 871 blocks = nodep->size / bps; 845 872 846 873 for (i = 0; i < blocks; i++) { … … 852 879 return rc; 853 880 } 854 for (j = 0; j < DPS(bs); j++) {881 for (j = 0; j < dps; j++) { 855 882 d = ((fat_dentry_t *)b->data) + j; 856 883 switch (fat_classify_dentry(d)) { … … 914 941 } 915 942 916 dev map_handle_t fat_device_get(fs_node_t *node)943 dev_handle_t fat_device_get(fs_node_t *node) 917 944 { 918 945 return 0; … … 946 973 void fat_mounted(ipc_callid_t rid, ipc_call_t *request) 947 974 { 948 dev map_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);975 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); 949 976 enum cache_mode cmode; 950 977 fat_bs_t *bs; 978 uint16_t bps; 979 uint16_t rde; 951 980 952 981 /* Accept the mount options */ … … 968 997 969 998 /* initialize libblock */ 970 rc = block_init(dev map_handle, BS_SIZE);999 rc = block_init(dev_handle, BS_SIZE); 971 1000 if (rc != EOK) { 972 1001 ipc_answer_0(rid, rc); … … 975 1004 976 1005 /* prepare the boot block */ 977 rc = block_bb_read(dev map_handle, BS_BLOCK);978 if (rc != EOK) { 979 block_fini(dev map_handle);1006 rc = block_bb_read(dev_handle, BS_BLOCK); 1007 if (rc != EOK) { 1008 block_fini(dev_handle); 980 1009 ipc_answer_0(rid, rc); 981 1010 return; … … 983 1012 984 1013 /* get the buffer with the boot sector */ 985 bs = block_bb_get(devmap_handle); 986 987 if (BPS(bs) != BS_SIZE) { 988 block_fini(devmap_handle); 1014 bs = block_bb_get(dev_handle); 1015 1016 /* Read the number of root directory entries. */ 1017 bps = uint16_t_le2host(bs->bps); 1018 rde = uint16_t_le2host(bs->root_ent_max); 1019 1020 if (bps != BS_SIZE) { 1021 block_fini(dev_handle); 989 1022 ipc_answer_0(rid, ENOTSUP); 990 1023 return; … … 992 1025 993 1026 /* Initialize the block cache */ 994 rc = block_cache_init(dev map_handle, BPS(bs), 0 /* XXX */, cmode);995 if (rc != EOK) { 996 block_fini(dev map_handle);1027 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode); 1028 if (rc != EOK) { 1029 block_fini(dev_handle); 997 1030 ipc_answer_0(rid, rc); 998 1031 return; … … 1000 1033 1001 1034 /* Do some simple sanity checks on the file system. */ 1002 rc = fat_sanity_check(bs, dev map_handle);1003 if (rc != EOK) { 1004 (void) block_cache_fini(dev map_handle);1005 block_fini(dev map_handle);1035 rc = fat_sanity_check(bs, dev_handle); 1036 if (rc != EOK) { 1037 (void) block_cache_fini(dev_handle); 1038 block_fini(dev_handle); 1006 1039 ipc_answer_0(rid, rc); 1007 1040 return; 1008 1041 } 1009 1042 1010 rc = fat_idx_init_by_dev map_handle(devmap_handle);1011 if (rc != EOK) { 1012 (void) block_cache_fini(dev map_handle);1013 block_fini(dev map_handle);1043 rc = fat_idx_init_by_dev_handle(dev_handle); 1044 if (rc != EOK) { 1045 (void) block_cache_fini(dev_handle); 1046 block_fini(dev_handle); 1014 1047 ipc_answer_0(rid, rc); 1015 1048 return; … … 1019 1052 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t)); 1020 1053 if (!rfn) { 1021 (void) block_cache_fini(dev map_handle);1022 block_fini(dev map_handle);1023 fat_idx_fini_by_dev map_handle(devmap_handle);1054 (void) block_cache_fini(dev_handle); 1055 block_fini(dev_handle); 1056 fat_idx_fini_by_dev_handle(dev_handle); 1024 1057 ipc_answer_0(rid, ENOMEM); 1025 1058 return; … … 1029 1062 if (!rootp) { 1030 1063 free(rfn); 1031 (void) block_cache_fini(dev map_handle);1032 block_fini(dev map_handle);1033 fat_idx_fini_by_dev map_handle(devmap_handle);1064 (void) block_cache_fini(dev_handle); 1065 block_fini(dev_handle); 1066 fat_idx_fini_by_dev_handle(dev_handle); 1034 1067 ipc_answer_0(rid, ENOMEM); 1035 1068 return; … … 1037 1070 fat_node_initialize(rootp); 1038 1071 1039 fat_idx_t *ridxp = fat_idx_get_by_pos(dev map_handle, FAT_CLST_ROOTPAR, 0);1072 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); 1040 1073 if (!ridxp) { 1041 1074 free(rfn); 1042 1075 free(rootp); 1043 (void) block_cache_fini(dev map_handle);1044 block_fini(dev map_handle);1045 fat_idx_fini_by_dev map_handle(devmap_handle);1076 (void) block_cache_fini(dev_handle); 1077 block_fini(dev_handle); 1078 fat_idx_fini_by_dev_handle(dev_handle); 1046 1079 ipc_answer_0(rid, ENOMEM); 1047 1080 return; … … 1054 1087 rootp->refcnt = 1; 1055 1088 rootp->lnkcnt = 0; /* FS root is not linked */ 1056 rootp->size = RDE(bs)* sizeof(fat_dentry_t);1089 rootp->size = rde * sizeof(fat_dentry_t); 1057 1090 rootp->idx = ridxp; 1058 1091 ridxp->nodep = rootp; … … 1072 1105 void fat_unmounted(ipc_callid_t rid, ipc_call_t *request) 1073 1106 { 1074 dev map_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);1107 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); 1075 1108 fs_node_t *fn; 1076 1109 fat_node_t *nodep; 1077 1110 int rc; 1078 1111 1079 rc = fat_root_get(&fn, dev map_handle);1112 rc = fat_root_get(&fn, dev_handle); 1080 1113 if (rc != EOK) { 1081 1114 ipc_answer_0(rid, rc); … … 1105 1138 * stop using libblock for this instance. 1106 1139 */ 1107 (void) fat_node_fini_by_dev map_handle(devmap_handle);1108 fat_idx_fini_by_dev map_handle(devmap_handle);1109 (void) block_cache_fini(dev map_handle);1110 block_fini(dev map_handle);1140 (void) fat_node_fini_by_dev_handle(dev_handle); 1141 fat_idx_fini_by_dev_handle(dev_handle); 1142 (void) block_cache_fini(dev_handle); 1143 block_fini(dev_handle); 1111 1144 1112 1145 ipc_answer_0(rid, EOK); … … 1125 1158 void fat_read(ipc_callid_t rid, ipc_call_t *request) 1126 1159 { 1127 dev map_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);1160 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); 1128 1161 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1129 1162 aoff64_t pos = … … 1132 1165 fat_node_t *nodep; 1133 1166 fat_bs_t *bs; 1167 uint16_t bps; 1134 1168 size_t bytes; 1135 1169 block_t *b; 1136 1170 int rc; 1137 1171 1138 rc = fat_node_get(&fn, dev map_handle, index);1172 rc = fat_node_get(&fn, dev_handle, index); 1139 1173 if (rc != EOK) { 1140 1174 ipc_answer_0(rid, rc); … … 1156 1190 } 1157 1191 1158 bs = block_bb_get(devmap_handle); 1192 bs = block_bb_get(dev_handle); 1193 bps = uint16_t_le2host(bs->bps); 1159 1194 1160 1195 if (nodep->type == FAT_FILE) { … … 1169 1204 (void) async_data_read_finalize(callid, NULL, 0); 1170 1205 } else { 1171 bytes = min(len, BPS(bs) - pos % BPS(bs));1206 bytes = min(len, bps - pos % bps); 1172 1207 bytes = min(bytes, nodep->size - pos); 1173 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),1208 rc = fat_block_get(&b, bs, nodep, pos / bps, 1174 1209 BLOCK_FLAGS_NONE); 1175 1210 if (rc != EOK) { … … 1179 1214 return; 1180 1215 } 1181 (void) async_data_read_finalize(callid, 1182 b ->data + pos % BPS(bs), bytes);1216 (void) async_data_read_finalize(callid, b->data + pos % bps, 1217 bytes); 1183 1218 rc = block_put(b); 1184 1219 if (rc != EOK) { … … 1195 1230 1196 1231 assert(nodep->type == FAT_DIRECTORY); 1197 assert(nodep->size % BPS(bs)== 0);1198 assert( BPS(bs)% sizeof(fat_dentry_t) == 0);1232 assert(nodep->size % bps == 0); 1233 assert(bps % sizeof(fat_dentry_t) == 0); 1199 1234 1200 1235 /* … … 1204 1239 * the position pointer accordingly. 1205 1240 */ 1206 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs);1207 while (bnum < nodep->size / BPS(bs)) {1241 bnum = (pos * sizeof(fat_dentry_t)) / bps; 1242 while (bnum < nodep->size / bps) { 1208 1243 aoff64_t o; 1209 1244 … … 1212 1247 if (rc != EOK) 1213 1248 goto err; 1214 for (o = pos % ( BPS(bs)/ sizeof(fat_dentry_t));1215 o < BPS(bs)/ sizeof(fat_dentry_t);1249 for (o = pos % (bps / sizeof(fat_dentry_t)); 1250 o < bps / sizeof(fat_dentry_t); 1216 1251 o++, pos++) { 1217 1252 d = ((fat_dentry_t *)b->data) + o; … … 1262 1297 void fat_write(ipc_callid_t rid, ipc_call_t *request) 1263 1298 { 1264 dev map_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);1299 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); 1265 1300 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1266 1301 aoff64_t pos = … … 1271 1306 size_t bytes, size; 1272 1307 block_t *b; 1308 uint16_t bps; 1309 unsigned spc; 1310 unsigned bpc; /* bytes per cluster */ 1273 1311 aoff64_t boundary; 1274 1312 int flags = BLOCK_FLAGS_NONE; 1275 1313 int rc; 1276 1314 1277 rc = fat_node_get(&fn, dev map_handle, index);1315 rc = fat_node_get(&fn, dev_handle, index); 1278 1316 if (rc != EOK) { 1279 1317 ipc_answer_0(rid, rc); … … 1295 1333 } 1296 1334 1297 bs = block_bb_get(devmap_handle); 1335 bs = block_bb_get(dev_handle); 1336 bps = uint16_t_le2host(bs->bps); 1337 spc = bs->spc; 1338 bpc = bps * spc; 1298 1339 1299 1340 /* … … 1304 1345 * value signalizing a smaller number of bytes written. 1305 1346 */ 1306 bytes = min(len, BPS(bs) - pos % BPS(bs));1307 if (bytes == BPS(bs))1347 bytes = min(len, bps - pos % bps); 1348 if (bytes == bps) 1308 1349 flags |= BLOCK_FLAGS_NOREAD; 1309 1350 1310 boundary = ROUND_UP(nodep->size, BPC(bs));1351 boundary = ROUND_UP(nodep->size, bpc); 1311 1352 if (pos < boundary) { 1312 1353 /* … … 1323 1364 return; 1324 1365 } 1325 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags);1366 rc = fat_block_get(&b, bs, nodep, pos / bps, flags); 1326 1367 if (rc != EOK) { 1327 1368 (void) fat_node_put(fn); … … 1330 1371 return; 1331 1372 } 1332 (void) async_data_write_finalize(callid, 1333 b ->data + pos % BPS(bs), bytes);1373 (void) async_data_write_finalize(callid, b->data + pos % bps, 1374 bytes); 1334 1375 b->dirty = true; /* need to sync block */ 1335 1376 rc = block_put(b); … … 1355 1396 fat_cluster_t mcl, lcl; 1356 1397 1357 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);1398 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; 1358 1399 /* create an independent chain of nclsts clusters in all FATs */ 1359 rc = fat_alloc_clusters(bs, dev map_handle, nclsts, &mcl, &lcl);1400 rc = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); 1360 1401 if (rc != EOK) { 1361 1402 /* could not allocate a chain of nclsts clusters */ … … 1368 1409 rc = fat_fill_gap(bs, nodep, mcl, pos); 1369 1410 if (rc != EOK) { 1370 (void) fat_free_clusters(bs, dev map_handle, mcl);1411 (void) fat_free_clusters(bs, dev_handle, mcl); 1371 1412 (void) fat_node_put(fn); 1372 1413 ipc_answer_0(callid, rc); … … 1374 1415 return; 1375 1416 } 1376 rc = _fat_block_get(&b, bs, dev map_handle, lcl, NULL,1377 (pos / BPS(bs)) % SPC(bs),flags);1378 if (rc != EOK) { 1379 (void) fat_free_clusters(bs, dev map_handle, mcl);1417 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc, 1418 flags); 1419 if (rc != EOK) { 1420 (void) fat_free_clusters(bs, dev_handle, mcl); 1380 1421 (void) fat_node_put(fn); 1381 1422 ipc_answer_0(callid, rc); … … 1383 1424 return; 1384 1425 } 1385 (void) async_data_write_finalize(callid, 1386 b ->data + pos % BPS(bs), bytes);1426 (void) async_data_write_finalize(callid, b->data + pos % bps, 1427 bytes); 1387 1428 b->dirty = true; /* need to sync block */ 1388 1429 rc = block_put(b); 1389 1430 if (rc != EOK) { 1390 (void) fat_free_clusters(bs, dev map_handle, mcl);1431 (void) fat_free_clusters(bs, dev_handle, mcl); 1391 1432 (void) fat_node_put(fn); 1392 1433 ipc_answer_0(rid, rc); … … 1397 1438 * node's cluster chain. 1398 1439 */ 1399 rc = fat_append_clusters(bs, nodep, mcl , lcl);1400 if (rc != EOK) { 1401 (void) fat_free_clusters(bs, dev map_handle, mcl);1440 rc = fat_append_clusters(bs, nodep, mcl); 1441 if (rc != EOK) { 1442 (void) fat_free_clusters(bs, dev_handle, mcl); 1402 1443 (void) fat_node_put(fn); 1403 1444 ipc_answer_0(rid, rc); … … 1414 1455 void fat_truncate(ipc_callid_t rid, ipc_call_t *request) 1415 1456 { 1416 dev map_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);1457 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); 1417 1458 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1418 1459 aoff64_t size = … … 1421 1462 fat_node_t *nodep; 1422 1463 fat_bs_t *bs; 1464 uint16_t bps; 1465 uint8_t spc; 1466 unsigned bpc; /* bytes per cluster */ 1423 1467 int rc; 1424 1468 1425 rc = fat_node_get(&fn, dev map_handle, index);1469 rc = fat_node_get(&fn, dev_handle, index); 1426 1470 if (rc != EOK) { 1427 1471 ipc_answer_0(rid, rc); … … 1434 1478 nodep = FAT_NODE(fn); 1435 1479 1436 bs = block_bb_get(devmap_handle); 1480 bs = block_bb_get(dev_handle); 1481 bps = uint16_t_le2host(bs->bps); 1482 spc = bs->spc; 1483 bpc = bps * spc; 1437 1484 1438 1485 if (nodep->size == size) { … … 1444 1491 */ 1445 1492 rc = EINVAL; 1446 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {1493 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { 1447 1494 /* 1448 1495 * The node will be shrunk, but no clusters will be deallocated. … … 1461 1508 } else { 1462 1509 fat_cluster_t lastc; 1463 rc = fat_cluster_walk(bs, dev map_handle, nodep->firstc,1464 &lastc, NULL, (size - 1) / BPC(bs));1510 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc, 1511 &lastc, NULL, (size - 1) / bpc); 1465 1512 if (rc != EOK) 1466 1513 goto out; … … 1486 1533 void fat_destroy(ipc_callid_t rid, ipc_call_t *request) 1487 1534 { 1488 dev map_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);1535 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); 1489 1536 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); 1490 1537 fs_node_t *fn; 1491 1538 int rc; 1492 1539 1493 rc = fat_node_get(&fn, dev map_handle, index);1540 rc = fat_node_get(&fn, dev_handle, index); 1494 1541 if (rc != EOK) { 1495 1542 ipc_answer_0(rid, rc); … … 1517 1564 void fat_sync(ipc_callid_t rid, ipc_call_t *request) 1518 1565 { 1519 dev map_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);1566 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); 1520 1567 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1521 1568 1522 1569 fs_node_t *fn; 1523 int rc = fat_node_get(&fn, dev map_handle, index);1570 int rc = fat_node_get(&fn, dev_handle, index); 1524 1571 if (rc != EOK) { 1525 1572 ipc_answer_0(rid, rc);
Note:
See TracChangeset
for help on using the changeset viewer.