Changes in uspace/srv/fs/fat/fat_ops.c [5ca5eaa7:d44aabd] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/fs/fat/fat_ops.c
r5ca5eaa7 rd44aabd 42 42 #include <libfs.h> 43 43 #include <libblock.h> 44 #include <ipc/ipc.h> 44 45 #include <ipc/services.h> 45 46 #include <ipc/devmap.h> 46 #include <macros.h>47 47 #include <async.h> 48 48 #include <errno.h> 49 #include <str .h>49 #include <string.h> 50 50 #include <byteorder.h> 51 51 #include <adt/hash_table.h> 52 52 #include <adt/list.h> 53 53 #include <assert.h> 54 #include <fibril_sync h.h>54 #include <fibril_sync.h> 55 55 #include <sys/mman.h> 56 56 #include <align.h> … … 59 59 #define FS_NODE(node) ((node) ? (node)->bp : NULL) 60 60 61 #define DPS(bs) (BPS((bs)) / sizeof(fat_dentry_t))62 #define BPC(bs) (BPS((bs)) * SPC((bs)))63 64 61 /** Mutex protecting the list of cached free FAT nodes. */ 65 62 static FIBRIL_MUTEX_INITIALIZE(ffn_mutex); … … 68 65 static LIST_INITIALIZE(ffn_head); 69 66 70 /*71 * Forward declarations of FAT libfs operations.72 */73 static int fat_root_get(fs_node_t **, devmap_handle_t);74 static int fat_match(fs_node_t **, fs_node_t *, const char *);75 static int fat_node_get(fs_node_t **, devmap_handle_t, fs_index_t);76 static int fat_node_open(fs_node_t *);77 static int fat_node_put(fs_node_t *);78 static int fat_create_node(fs_node_t **, devmap_handle_t, int);79 static int fat_destroy_node(fs_node_t *);80 static int fat_link(fs_node_t *, fs_node_t *, const char *);81 static int fat_unlink(fs_node_t *, fs_node_t *, const char *);82 static int fat_has_children(bool *, fs_node_t *);83 static fs_index_t fat_index_get(fs_node_t *);84 static aoff64_t fat_size_get(fs_node_t *);85 static unsigned fat_lnkcnt_get(fs_node_t *);86 static char fat_plb_get_char(unsigned);87 static bool fat_is_directory(fs_node_t *);88 static bool fat_is_file(fs_node_t *node);89 static devmap_handle_t fat_device_get(fs_node_t *node);90 91 /*92 * Helper functions.93 */94 67 static void fat_node_initialize(fat_node_t *node) 95 68 { … … 103 76 node->refcnt = 0; 104 77 node->dirty = false; 105 node->lastc_cached_valid = false; 106 node->lastc_cached_value = FAT_CLST_LAST1; 107 node->currc_cached_valid = false; 108 node->currc_cached_bn = 0; 109 node->currc_cached_value = FAT_CLST_LAST1; 110 } 111 112 static int fat_node_sync(fat_node_t *node) 78 } 79 80 static void fat_node_sync(fat_node_t *node) 113 81 { 114 82 block_t *b; 115 83 fat_bs_t *bs; 116 84 fat_dentry_t *d; 117 int rc; 85 uint16_t bps; 86 unsigned dps; 118 87 119 88 assert(node->dirty); 120 89 121 bs = block_bb_get(node->idx->devmap_handle); 90 bs = block_bb_get(node->idx->dev_handle); 91 bps = uint16_t_le2host(bs->bps); 92 dps = bps / sizeof(fat_dentry_t); 122 93 123 94 /* Read the block that contains the dentry of interest. */ 124 rc = _fat_block_get(&b, bs, node->idx->devmap_handle, node->idx->pfc, 125 NULL, (node->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 126 BLOCK_FLAGS_NONE); 127 if (rc != EOK) 128 return rc; 129 130 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % DPS(bs)); 95 b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, 96 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); 97 98 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); 131 99 132 100 d->firstc = host2uint16_t_le(node->firstc); … … 140 108 141 109 b->dirty = true; /* need to sync block */ 142 rc = block_put(b); 143 return rc; 144 } 145 146 static int fat_node_fini_by_devmap_handle(devmap_handle_t devmap_handle) 147 { 148 link_t *lnk; 149 fat_node_t *nodep; 150 int rc; 151 152 /* 153 * We are called from fat_unmounted() and assume that there are already 154 * no nodes belonging to this instance with non-zero refcount. Therefore 155 * it is sufficient to clean up only the FAT free node list. 156 */ 157 158 restart: 159 fibril_mutex_lock(&ffn_mutex); 160 for (lnk = ffn_head.next; lnk != &ffn_head; lnk = lnk->next) { 161 nodep = list_get_instance(lnk, fat_node_t, ffn_link); 162 if (!fibril_mutex_trylock(&nodep->lock)) { 163 fibril_mutex_unlock(&ffn_mutex); 164 goto restart; 165 } 166 if (!fibril_mutex_trylock(&nodep->idx->lock)) { 167 fibril_mutex_unlock(&nodep->lock); 168 fibril_mutex_unlock(&ffn_mutex); 169 goto restart; 170 } 171 if (nodep->idx->devmap_handle != devmap_handle) { 172 fibril_mutex_unlock(&nodep->idx->lock); 173 fibril_mutex_unlock(&nodep->lock); 174 continue; 175 } 176 177 list_remove(&nodep->ffn_link); 178 fibril_mutex_unlock(&ffn_mutex); 179 180 /* 181 * We can unlock the node and its index structure because we are 182 * the last player on this playground and VFS is preventing new 183 * players from entering. 184 */ 185 fibril_mutex_unlock(&nodep->idx->lock); 186 fibril_mutex_unlock(&nodep->lock); 187 188 if (nodep->dirty) { 189 rc = fat_node_sync(nodep); 190 if (rc != EOK) 191 return rc; 192 } 193 nodep->idx->nodep = NULL; 194 free(nodep->bp); 195 free(nodep); 196 197 /* Need to restart because we changed the ffn_head list. */ 198 goto restart; 199 } 200 fibril_mutex_unlock(&ffn_mutex); 201 202 return EOK; 203 } 204 205 static int fat_node_get_new(fat_node_t **nodepp) 110 block_put(b); 111 } 112 113 static fat_node_t *fat_node_get_new(void) 206 114 { 207 115 fs_node_t *fn; 208 116 fat_node_t *nodep; 209 int rc;210 117 211 118 fibril_mutex_lock(&ffn_mutex); … … 223 130 list_remove(&nodep->ffn_link); 224 131 fibril_mutex_unlock(&ffn_mutex); 225 if (nodep->dirty) { 226 rc = fat_node_sync(nodep); 227 if (rc != EOK) { 228 idxp_tmp->nodep = NULL; 229 fibril_mutex_unlock(&nodep->lock); 230 fibril_mutex_unlock(&idxp_tmp->lock); 231 free(nodep->bp); 232 free(nodep); 233 return rc; 234 } 235 } 132 if (nodep->dirty) 133 fat_node_sync(nodep); 236 134 idxp_tmp->nodep = NULL; 237 135 fibril_mutex_unlock(&nodep->lock); … … 244 142 fn = (fs_node_t *)malloc(sizeof(fs_node_t)); 245 143 if (!fn) 246 return ENOMEM;144 return NULL; 247 145 nodep = (fat_node_t *)malloc(sizeof(fat_node_t)); 248 146 if (!nodep) { 249 147 free(fn); 250 return ENOMEM;148 return NULL; 251 149 } 252 150 } … … 256 154 nodep->bp = fn; 257 155 258 *nodepp = nodep; 259 return EOK; 156 return nodep; 260 157 } 261 158 … … 264 161 * @param idxp Locked index structure. 265 162 */ 266 static int fat_node_get_core(fat_node_t **nodepp,fat_idx_t *idxp)163 static fat_node_t *fat_node_get_core(fat_idx_t *idxp) 267 164 { 268 165 block_t *b; … … 270 167 fat_dentry_t *d; 271 168 fat_node_t *nodep = NULL; 272 int rc; 169 unsigned bps; 170 unsigned spc; 171 unsigned dps; 273 172 274 173 if (idxp->nodep) { … … 278 177 */ 279 178 fibril_mutex_lock(&idxp->nodep->lock); 280 if (!idxp->nodep->refcnt++) { 281 fibril_mutex_lock(&ffn_mutex); 179 if (!idxp->nodep->refcnt++) 282 180 list_remove(&idxp->nodep->ffn_link); 283 fibril_mutex_unlock(&ffn_mutex);284 }285 181 fibril_mutex_unlock(&idxp->nodep->lock); 286 *nodepp = idxp->nodep; 287 return EOK; 182 return idxp->nodep; 288 183 } 289 184 … … 294 189 assert(idxp->pfc); 295 190 296 rc = fat_node_get_new(&nodep); 297 if (rc != EOK) 298 return rc; 299 300 bs = block_bb_get(idxp->devmap_handle); 191 nodep = fat_node_get_new(); 192 if (!nodep) 193 return NULL; 194 195 bs = block_bb_get(idxp->dev_handle); 196 bps = uint16_t_le2host(bs->bps); 197 spc = bs->spc; 198 dps = bps / sizeof(fat_dentry_t); 301 199 302 200 /* Read the block that contains the dentry of interest. */ 303 rc = _fat_block_get(&b, bs, idxp->devmap_handle, idxp->pfc, NULL, 304 (idxp->pdi * sizeof(fat_dentry_t)) / BPS(bs), BLOCK_FLAGS_NONE); 305 if (rc != EOK) { 306 (void) fat_node_put(FS_NODE(nodep)); 307 return rc; 308 } 309 310 d = ((fat_dentry_t *)b->data) + (idxp->pdi % DPS(bs)); 201 b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, 202 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); 203 assert(b); 204 205 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); 311 206 if (d->attr & FAT_ATTR_SUBDIR) { 312 207 /* … … 321 216 * size of the directory by walking the FAT. 322 217 */ 323 uint16_t clusters; 324 rc = fat_clusters_get(&clusters, bs, idxp->devmap_handle, 218 nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, 325 219 uint16_t_le2host(d->firstc)); 326 if (rc != EOK) {327 (void) block_put(b);328 (void) fat_node_put(FS_NODE(nodep));329 return rc;330 }331 nodep->size = BPS(bs) * SPC(bs) * clusters;332 220 } else { 333 221 nodep->type = FAT_FILE; … … 338 226 nodep->refcnt = 1; 339 227 340 rc = block_put(b); 341 if (rc != EOK) { 342 (void) fat_node_put(FS_NODE(nodep)); 343 return rc; 344 } 228 block_put(b); 345 229 346 230 /* Link the idx structure with the node structure. */ … … 348 232 idxp->nodep = nodep; 349 233 350 *nodepp = nodep; 351 return EOK; 352 } 234 return nodep; 235 } 236 237 /* 238 * Forward declarations of FAT libfs operations. 239 */ 240 static fs_node_t *fat_node_get(dev_handle_t, fs_index_t); 241 static void fat_node_put(fs_node_t *); 242 static fs_node_t *fat_create_node(dev_handle_t, int); 243 static int fat_destroy_node(fs_node_t *); 244 static int fat_link(fs_node_t *, fs_node_t *, const char *); 245 static int fat_unlink(fs_node_t *, fs_node_t *, const char *); 246 static fs_node_t *fat_match(fs_node_t *, const char *); 247 static fs_index_t fat_index_get(fs_node_t *); 248 static size_t fat_size_get(fs_node_t *); 249 static unsigned fat_lnkcnt_get(fs_node_t *); 250 static bool fat_has_children(fs_node_t *); 251 static fs_node_t *fat_root_get(dev_handle_t); 252 static char fat_plb_get_char(unsigned); 253 static bool fat_is_directory(fs_node_t *); 254 static bool fat_is_file(fs_node_t *node); 353 255 354 256 /* … … 356 258 */ 357 259 358 int fat_root_get(fs_node_t **rfn, devmap_handle_t devmap_handle)359 {360 return fat_node_get(rfn, devmap_handle, 0);361 }362 363 int fat_match(fs_node_t **rfn, fs_node_t *pfn, const char *component)364 {365 fat_bs_t *bs;366 fat_node_t *parentp = FAT_NODE(pfn);367 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1];368 unsigned i, j;369 unsigned blocks;370 fat_dentry_t *d;371 devmap_handle_t devmap_handle;372 block_t *b;373 int rc;374 375 fibril_mutex_lock(&parentp->idx->lock);376 devmap_handle = parentp->idx->devmap_handle;377 fibril_mutex_unlock(&parentp->idx->lock);378 379 bs = block_bb_get(devmap_handle);380 blocks = parentp->size / BPS(bs);381 for (i = 0; i < blocks; i++) {382 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE);383 if (rc != EOK)384 return rc;385 for (j = 0; j < DPS(bs); j++) {386 d = ((fat_dentry_t *)b->data) + j;387 switch (fat_classify_dentry(d)) {388 case FAT_DENTRY_SKIP:389 case FAT_DENTRY_FREE:390 continue;391 case FAT_DENTRY_LAST:392 /* miss */393 rc = block_put(b);394 *rfn = NULL;395 return rc;396 default:397 case FAT_DENTRY_VALID:398 fat_dentry_name_get(d, name);399 break;400 }401 if (fat_dentry_namecmp(name, component) == 0) {402 /* hit */403 fat_node_t *nodep;404 fat_idx_t *idx = fat_idx_get_by_pos(devmap_handle,405 parentp->firstc, i * DPS(bs) + j);406 if (!idx) {407 /*408 * Can happen if memory is low or if we409 * run out of 32-bit indices.410 */411 rc = block_put(b);412 return (rc == EOK) ? ENOMEM : rc;413 }414 rc = fat_node_get_core(&nodep, idx);415 fibril_mutex_unlock(&idx->lock);416 if (rc != EOK) {417 (void) block_put(b);418 return rc;419 }420 *rfn = FS_NODE(nodep);421 rc = block_put(b);422 if (rc != EOK)423 (void) fat_node_put(*rfn);424 return rc;425 }426 }427 rc = block_put(b);428 if (rc != EOK)429 return rc;430 }431 432 *rfn = NULL;433 return EOK;434 }435 436 260 /** Instantiate a FAT in-core node. */ 437 int fat_node_get(fs_node_t **rfn, devmap_handle_t devmap_handle, fs_index_t index)261 fs_node_t *fat_node_get(dev_handle_t dev_handle, fs_index_t index) 438 262 { 439 263 fat_node_t *nodep; 440 264 fat_idx_t *idxp; 441 int rc; 442 443 idxp = fat_idx_get_by_index(devmap_handle, index); 444 if (!idxp) { 445 *rfn = NULL; 446 return EOK; 447 } 265 266 idxp = fat_idx_get_by_index(dev_handle, index); 267 if (!idxp) 268 return NULL; 448 269 /* idxp->lock held */ 449 rc = fat_node_get_core(&nodep,idxp);270 nodep = fat_node_get_core(idxp); 450 271 fibril_mutex_unlock(&idxp->lock); 451 if (rc == EOK) 452 *rfn = FS_NODE(nodep); 453 return rc; 454 } 455 456 int fat_node_open(fs_node_t *fn) 457 { 458 /* 459 * Opening a file is stateless, nothing 460 * to be done here. 461 */ 462 return EOK; 463 } 464 465 int fat_node_put(fs_node_t *fn) 272 return FS_NODE(nodep); 273 } 274 275 void fat_node_put(fs_node_t *fn) 466 276 { 467 277 fat_node_t *nodep = FAT_NODE(fn); … … 489 299 free(nodep); 490 300 } 491 return EOK; 492 } 493 494 int fat_create_node(fs_node_t **rfn, devmap_handle_t devmap_handle, int flags) 301 } 302 303 fs_node_t *fat_create_node(dev_handle_t dev_handle, int flags) 495 304 { 496 305 fat_idx_t *idxp; … … 498 307 fat_bs_t *bs; 499 308 fat_cluster_t mcl, lcl; 309 uint16_t bps; 500 310 int rc; 501 311 502 bs = block_bb_get(devmap_handle); 312 bs = block_bb_get(dev_handle); 313 bps = uint16_t_le2host(bs->bps); 503 314 if (flags & L_DIRECTORY) { 504 315 /* allocate a cluster */ 505 rc = fat_alloc_clusters(bs, devmap_handle, 1, &mcl, &lcl); 506 if (rc != EOK) 507 return rc; 508 /* populate the new cluster with unused dentries */ 509 rc = fat_zero_cluster(bs, devmap_handle, mcl); 510 if (rc != EOK) { 511 (void) fat_free_clusters(bs, devmap_handle, mcl); 512 return rc; 513 } 514 } 515 516 rc = fat_node_get_new(&nodep); 517 if (rc != EOK) { 518 (void) fat_free_clusters(bs, devmap_handle, mcl); 519 return rc; 520 } 521 rc = fat_idx_get_new(&idxp, devmap_handle); 522 if (rc != EOK) { 523 (void) fat_free_clusters(bs, devmap_handle, mcl); 524 (void) fat_node_put(FS_NODE(nodep)); 525 return rc; 316 rc = fat_alloc_clusters(bs, dev_handle, 1, &mcl, &lcl); 317 if (rc != EOK) 318 return NULL; 319 } 320 321 nodep = fat_node_get_new(); 322 if (!nodep) { 323 fat_free_clusters(bs, dev_handle, mcl); 324 return NULL; 325 } 326 idxp = fat_idx_get_new(dev_handle); 327 if (!idxp) { 328 fat_free_clusters(bs, dev_handle, mcl); 329 fat_node_put(FS_NODE(nodep)); 330 return NULL; 526 331 } 527 332 /* idxp->lock held */ 528 333 if (flags & L_DIRECTORY) { 334 /* Populate the new cluster with unused dentries. */ 335 fat_zero_cluster(bs, dev_handle, mcl); 529 336 nodep->type = FAT_DIRECTORY; 530 337 nodep->firstc = mcl; 531 nodep->size = BPS(bs) * SPC(bs);338 nodep->size = bps * bs->spc; 532 339 } else { 533 340 nodep->type = FAT_FILE; … … 543 350 544 351 fibril_mutex_unlock(&idxp->lock); 545 *rfn = FS_NODE(nodep); 546 return EOK; 352 return FS_NODE(nodep); 547 353 } 548 354 … … 551 357 fat_node_t *nodep = FAT_NODE(fn); 552 358 fat_bs_t *bs; 553 bool has_children;554 int rc;555 359 556 360 /* … … 565 369 * The node may not have any children. 566 370 */ 567 rc = fat_has_children(&has_children, fn); 568 if (rc != EOK) 569 return rc; 570 assert(!has_children); 571 572 bs = block_bb_get(nodep->idx->devmap_handle); 371 assert(fat_has_children(fn) == false); 372 373 bs = block_bb_get(nodep->idx->dev_handle); 573 374 if (nodep->firstc != FAT_CLST_RES0) { 574 375 assert(nodep->size); 575 376 /* Free all clusters allocated to the node. */ 576 rc = fat_free_clusters(bs, nodep->idx->devmap_handle, 577 nodep->firstc); 377 fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc); 578 378 } 579 379 … … 581 381 free(nodep->bp); 582 382 free(nodep); 583 return rc;383 return EOK; 584 384 } 585 385 … … 592 392 block_t *b; 593 393 unsigned i, j; 394 uint16_t bps; 395 unsigned dps; 594 396 unsigned blocks; 595 397 fat_cluster_t mcl, lcl; … … 620 422 621 423 fibril_mutex_lock(&parentp->idx->lock); 622 bs = block_bb_get(parentp->idx->devmap_handle); 623 624 blocks = parentp->size / BPS(bs); 424 bs = block_bb_get(parentp->idx->dev_handle); 425 bps = uint16_t_le2host(bs->bps); 426 dps = bps / sizeof(fat_dentry_t); 427 428 blocks = parentp->size / bps; 625 429 626 430 for (i = 0; i < blocks; i++) { 627 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 628 if (rc != EOK) { 629 fibril_mutex_unlock(&parentp->idx->lock); 630 return rc; 631 } 632 for (j = 0; j < DPS(bs); j++) { 431 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); 432 for (j = 0; j < dps; j++) { 633 433 d = ((fat_dentry_t *)b->data) + j; 634 434 switch (fat_classify_dentry(d)) { … … 643 443 } 644 444 } 645 rc = block_put(b); 646 if (rc != EOK) { 647 fibril_mutex_unlock(&parentp->idx->lock); 648 return rc; 649 } 445 block_put(b); 650 446 } 651 447 j = 0; … … 659 455 return ENOSPC; 660 456 } 661 rc = fat_alloc_clusters(bs, parentp->idx->dev map_handle, 1, &mcl, &lcl);457 rc = fat_alloc_clusters(bs, parentp->idx->dev_handle, 1, &mcl, &lcl); 662 458 if (rc != EOK) { 663 459 fibril_mutex_unlock(&parentp->idx->lock); 664 460 return rc; 665 461 } 666 rc = fat_zero_cluster(bs, parentp->idx->devmap_handle, mcl); 667 if (rc != EOK) { 668 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl); 669 fibril_mutex_unlock(&parentp->idx->lock); 670 return rc; 671 } 672 rc = fat_append_clusters(bs, parentp, mcl, lcl); 673 if (rc != EOK) { 674 (void) fat_free_clusters(bs, parentp->idx->devmap_handle, mcl); 675 fibril_mutex_unlock(&parentp->idx->lock); 676 return rc; 677 } 678 parentp->size += BPS(bs) * SPC(bs); 462 fat_zero_cluster(bs, parentp->idx->dev_handle, mcl); 463 fat_append_clusters(bs, parentp, mcl); 464 parentp->size += bps * bs->spc; 679 465 parentp->dirty = true; /* need to sync node */ 680 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 681 if (rc != EOK) { 682 fibril_mutex_unlock(&parentp->idx->lock); 683 return rc; 684 } 466 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); 685 467 d = (fat_dentry_t *)b->data; 686 468 … … 695 477 fat_dentry_name_set(d, name); 696 478 b->dirty = true; /* need to sync block */ 697 rc =block_put(b);479 block_put(b); 698 480 fibril_mutex_unlock(&parentp->idx->lock); 699 if (rc != EOK)700 return rc;701 481 702 482 fibril_mutex_lock(&childp->idx->lock); 703 483 704 if (childp->type == FAT_DIRECTORY) { 705 /* 706 * If possible, create the Sub-directory Identifier Entry and 707 * the Sub-directory Parent Pointer Entry (i.e. "." and ".."). 708 * These entries are not mandatory according to Standard 709 * ECMA-107 and HelenOS VFS does not use them anyway, so this is 710 * rather a sign of our good will. 711 */ 712 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE); 713 if (rc != EOK) { 714 /* 715 * Rather than returning an error, simply skip the 716 * creation of these two entries. 717 */ 718 goto skip_dots; 719 } 720 d = (fat_dentry_t *) b->data; 721 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) || 722 (str_cmp((char *) d->name, FAT_NAME_DOT)) == 0) { 723 memset(d, 0, sizeof(fat_dentry_t)); 724 str_cpy((char *) d->name, 8, FAT_NAME_DOT); 725 str_cpy((char *) d->ext, 3, FAT_EXT_PAD); 726 d->attr = FAT_ATTR_SUBDIR; 727 d->firstc = host2uint16_t_le(childp->firstc); 728 /* TODO: initialize also the date/time members. */ 729 } 730 d++; 731 if ((fat_classify_dentry(d) == FAT_DENTRY_LAST) || 732 (str_cmp((char *) d->name, FAT_NAME_DOT_DOT) == 0)) { 733 memset(d, 0, sizeof(fat_dentry_t)); 734 str_cpy((char *) d->name, 8, FAT_NAME_DOT_DOT); 735 str_cpy((char *) d->ext, 3, FAT_EXT_PAD); 736 d->attr = FAT_ATTR_SUBDIR; 737 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ? 738 host2uint16_t_le(FAT_CLST_RES0) : 739 host2uint16_t_le(parentp->firstc); 740 /* TODO: initialize also the date/time members. */ 741 } 742 b->dirty = true; /* need to sync block */ 743 /* 744 * Ignore the return value as we would have fallen through on error 745 * anyway. 746 */ 747 (void) block_put(b); 748 } 749 skip_dots: 484 /* 485 * If possible, create the Sub-directory Identifier Entry and the 486 * Sub-directory Parent Pointer Entry (i.e. "." and ".."). These entries 487 * are not mandatory according to Standard ECMA-107 and HelenOS VFS does 488 * not use them anyway, so this is rather a sign of our good will. 489 */ 490 b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE); 491 d = (fat_dentry_t *)b->data; 492 if (fat_classify_dentry(d) == FAT_DENTRY_LAST || 493 str_cmp(d->name, FAT_NAME_DOT) == 0) { 494 memset(d, 0, sizeof(fat_dentry_t)); 495 str_cpy(d->name, 8, FAT_NAME_DOT); 496 str_cpy(d->ext, 3, FAT_EXT_PAD); 497 d->attr = FAT_ATTR_SUBDIR; 498 d->firstc = host2uint16_t_le(childp->firstc); 499 /* TODO: initialize also the date/time members. */ 500 } 501 d++; 502 if (fat_classify_dentry(d) == FAT_DENTRY_LAST || 503 str_cmp(d->name, FAT_NAME_DOT_DOT) == 0) { 504 memset(d, 0, sizeof(fat_dentry_t)); 505 str_cpy(d->name, 8, FAT_NAME_DOT_DOT); 506 str_cpy(d->ext, 3, FAT_EXT_PAD); 507 d->attr = FAT_ATTR_SUBDIR; 508 d->firstc = (parentp->firstc == FAT_CLST_ROOT) ? 509 host2uint16_t_le(FAT_CLST_RES0) : 510 host2uint16_t_le(parentp->firstc); 511 /* TODO: initialize also the date/time members. */ 512 } 513 b->dirty = true; /* need to sync block */ 514 block_put(b); 750 515 751 516 childp->idx->pfc = parentp->firstc; 752 childp->idx->pdi = i * DPS(bs)+ j;517 childp->idx->pdi = i * dps + j; 753 518 fibril_mutex_unlock(&childp->idx->lock); 754 519 … … 772 537 fat_bs_t *bs; 773 538 fat_dentry_t *d; 539 uint16_t bps; 774 540 block_t *b; 775 bool has_children;776 int rc;777 541 778 542 if (!parentp) 779 543 return EBUSY; 780 544 781 rc = fat_has_children(&has_children, cfn); 782 if (rc != EOK) 783 return rc; 784 if (has_children) 545 if (fat_has_children(cfn)) 785 546 return ENOTEMPTY; 786 547 … … 789 550 assert(childp->lnkcnt == 1); 790 551 fibril_mutex_lock(&childp->idx->lock); 791 bs = block_bb_get(childp->idx->devmap_handle); 792 793 rc = _fat_block_get(&b, bs, childp->idx->devmap_handle, childp->idx->pfc, 794 NULL, (childp->idx->pdi * sizeof(fat_dentry_t)) / BPS(bs), 552 bs = block_bb_get(childp->idx->dev_handle); 553 bps = uint16_t_le2host(bs->bps); 554 555 b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc, 556 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps, 795 557 BLOCK_FLAGS_NONE); 796 if (rc != EOK)797 goto error;798 558 d = (fat_dentry_t *)b->data + 799 (childp->idx->pdi % ( BPS(bs)/ sizeof(fat_dentry_t)));559 (childp->idx->pdi % (bps / sizeof(fat_dentry_t))); 800 560 /* mark the dentry as not-currently-used */ 801 561 d->name[0] = FAT_DENTRY_ERASED; 802 562 b->dirty = true; /* need to sync block */ 803 rc = block_put(b); 804 if (rc != EOK) 805 goto error; 563 block_put(b); 806 564 807 565 /* remove the index structure from the position hash */ … … 812 570 fibril_mutex_unlock(&childp->idx->lock); 813 571 childp->lnkcnt = 0; 814 childp->refcnt++; /* keep the node in memory until destroyed */815 572 childp->dirty = true; 816 573 fibril_mutex_unlock(&childp->lock); … … 818 575 819 576 return EOK; 820 821 error: 822 fibril_mutex_unlock(&parentp->idx->lock); 823 fibril_mutex_unlock(&childp->lock); 824 fibril_mutex_unlock(&childp->idx->lock); 825 return rc; 826 } 827 828 int fat_has_children(bool *has_children, fs_node_t *fn) 577 } 578 579 fs_node_t *fat_match(fs_node_t *pfn, const char *component) 829 580 { 830 581 fat_bs_t *bs; 831 fat_node_t *nodep = FAT_NODE(fn); 582 fat_node_t *parentp = FAT_NODE(pfn); 583 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; 584 unsigned i, j; 585 unsigned bps; /* bytes per sector */ 586 unsigned dps; /* dentries per sector */ 832 587 unsigned blocks; 588 fat_dentry_t *d; 833 589 block_t *b; 834 unsigned i, j; 835 int rc; 836 837 if (nodep->type != FAT_DIRECTORY) { 838 *has_children = false; 839 return EOK; 840 } 841 842 fibril_mutex_lock(&nodep->idx->lock); 843 bs = block_bb_get(nodep->idx->devmap_handle); 844 845 blocks = nodep->size / BPS(bs); 846 590 591 fibril_mutex_lock(&parentp->idx->lock); 592 bs = block_bb_get(parentp->idx->dev_handle); 593 bps = uint16_t_le2host(bs->bps); 594 dps = bps / sizeof(fat_dentry_t); 595 blocks = parentp->size / bps; 847 596 for (i = 0; i < blocks; i++) { 848 fat_dentry_t *d; 849 850 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE); 851 if (rc != EOK) { 852 fibril_mutex_unlock(&nodep->idx->lock); 853 return rc; 854 } 855 for (j = 0; j < DPS(bs); j++) { 597 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); 598 for (j = 0; j < dps; j++) { 856 599 d = ((fat_dentry_t *)b->data) + j; 857 600 switch (fat_classify_dentry(d)) { … … 860 603 continue; 861 604 case FAT_DENTRY_LAST: 862 rc = block_put(b); 863 fibril_mutex_unlock(&nodep->idx->lock); 864 *has_children = false; 865 return rc; 605 block_put(b); 606 fibril_mutex_unlock(&parentp->idx->lock); 607 return NULL; 866 608 default: 867 609 case FAT_DENTRY_VALID: 868 rc = block_put(b); 869 fibril_mutex_unlock(&nodep->idx->lock); 870 *has_children = true; 871 return rc; 610 fat_dentry_name_get(d, name); 611 break; 612 } 613 if (fat_dentry_namecmp(name, component) == 0) { 614 /* hit */ 615 fat_node_t *nodep; 616 /* 617 * Assume tree hierarchy for locking. We 618 * already have the parent and now we are going 619 * to lock the child. Never lock in the oposite 620 * order. 621 */ 622 fat_idx_t *idx = fat_idx_get_by_pos( 623 parentp->idx->dev_handle, parentp->firstc, 624 i * dps + j); 625 fibril_mutex_unlock(&parentp->idx->lock); 626 if (!idx) { 627 /* 628 * Can happen if memory is low or if we 629 * run out of 32-bit indices. 630 */ 631 block_put(b); 632 return NULL; 633 } 634 nodep = fat_node_get_core(idx); 635 fibril_mutex_unlock(&idx->lock); 636 block_put(b); 637 return FS_NODE(nodep); 872 638 } 873 639 } 874 rc = block_put(b); 875 if (rc != EOK) { 640 block_put(b); 641 } 642 643 fibril_mutex_unlock(&parentp->idx->lock); 644 return NULL; 645 } 646 647 fs_index_t fat_index_get(fs_node_t *fn) 648 { 649 return FAT_NODE(fn)->idx->index; 650 } 651 652 size_t fat_size_get(fs_node_t *fn) 653 { 654 return FAT_NODE(fn)->size; 655 } 656 657 unsigned fat_lnkcnt_get(fs_node_t *fn) 658 { 659 return FAT_NODE(fn)->lnkcnt; 660 } 661 662 bool fat_has_children(fs_node_t *fn) 663 { 664 fat_bs_t *bs; 665 fat_node_t *nodep = FAT_NODE(fn); 666 unsigned bps; 667 unsigned dps; 668 unsigned blocks; 669 block_t *b; 670 unsigned i, j; 671 672 if (nodep->type != FAT_DIRECTORY) 673 return false; 674 675 fibril_mutex_lock(&nodep->idx->lock); 676 bs = block_bb_get(nodep->idx->dev_handle); 677 bps = uint16_t_le2host(bs->bps); 678 dps = bps / sizeof(fat_dentry_t); 679 680 blocks = nodep->size / bps; 681 682 for (i = 0; i < blocks; i++) { 683 fat_dentry_t *d; 684 685 b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE); 686 for (j = 0; j < dps; j++) { 687 d = ((fat_dentry_t *)b->data) + j; 688 switch (fat_classify_dentry(d)) { 689 case FAT_DENTRY_SKIP: 690 case FAT_DENTRY_FREE: 691 continue; 692 case FAT_DENTRY_LAST: 693 block_put(b); 694 fibril_mutex_unlock(&nodep->idx->lock); 695 return false; 696 default: 697 case FAT_DENTRY_VALID: 698 block_put(b); 699 fibril_mutex_unlock(&nodep->idx->lock); 700 return true; 701 } 702 block_put(b); 876 703 fibril_mutex_unlock(&nodep->idx->lock); 877 return rc;704 return true; 878 705 } 706 block_put(b); 879 707 } 880 708 881 709 fibril_mutex_unlock(&nodep->idx->lock); 882 *has_children = false; 883 return EOK; 884 } 885 886 887 fs_index_t fat_index_get(fs_node_t *fn) 888 { 889 return FAT_NODE(fn)->idx->index; 890 } 891 892 aoff64_t fat_size_get(fs_node_t *fn) 893 { 894 return FAT_NODE(fn)->size; 895 } 896 897 unsigned fat_lnkcnt_get(fs_node_t *fn) 898 { 899 return FAT_NODE(fn)->lnkcnt; 710 return false; 711 } 712 713 fs_node_t *fat_root_get(dev_handle_t dev_handle) 714 { 715 return fat_node_get(dev_handle, 0); 900 716 } 901 717 … … 913 729 { 914 730 return FAT_NODE(fn)->type == FAT_FILE; 915 }916 917 devmap_handle_t fat_device_get(fs_node_t *node)918 {919 return 0;920 731 } 921 732 922 733 /** libfs operations */ 923 734 libfs_ops_t fat_libfs_ops = { 924 .root_get = fat_root_get,925 735 .match = fat_match, 926 736 .node_get = fat_node_get, 927 .node_open = fat_node_open,928 737 .node_put = fat_node_put, 929 738 .create = fat_create_node, … … 931 740 .link = fat_link, 932 741 .unlink = fat_unlink, 933 .has_children = fat_has_children,934 742 .index_get = fat_index_get, 935 743 .size_get = fat_size_get, 936 744 .lnkcnt_get = fat_lnkcnt_get, 937 .plb_get_char = fat_plb_get_char, 745 .has_children = fat_has_children, 746 .root_get = fat_root_get, 747 .plb_get_char = fat_plb_get_char, 938 748 .is_directory = fat_is_directory, 939 .is_file = fat_is_file, 940 .device_get = fat_device_get 749 .is_file = fat_is_file 941 750 }; 942 751 … … 947 756 void fat_mounted(ipc_callid_t rid, ipc_call_t *request) 948 757 { 949 dev map_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);758 dev_handle_t dev_handle = (dev_handle_t) IPC_GET_ARG1(*request); 950 759 enum cache_mode cmode; 951 760 fat_bs_t *bs; 952 953 /* Accept the mount options */ 954 char *opts; 955 int rc = async_data_write_accept((void **) &opts, true, 0, 0, 0, NULL); 956 957 if (rc != EOK) { 958 async_answer_0(rid, rc); 959 return; 960 } 761 uint16_t bps; 762 uint16_t rde; 763 int rc; 764 765 /* accept the mount options */ 766 ipc_callid_t callid; 767 size_t size; 768 if (!ipc_data_write_receive(&callid, &size)) { 769 ipc_answer_0(callid, EINVAL); 770 ipc_answer_0(rid, EINVAL); 771 return; 772 } 773 char *opts = malloc(size + 1); 774 if (!opts) { 775 ipc_answer_0(callid, ENOMEM); 776 ipc_answer_0(rid, ENOMEM); 777 return; 778 } 779 ipcarg_t retval = ipc_data_write_finalize(callid, opts, size); 780 if (retval != EOK) { 781 ipc_answer_0(rid, retval); 782 free(opts); 783 return; 784 } 785 opts[size] = '\0'; 961 786 962 787 /* Check for option enabling write through. */ … … 966 791 cmode = CACHE_MODE_WB; 967 792 968 free(opts);969 970 793 /* initialize libblock */ 971 rc = block_init(dev map_handle, BS_SIZE);794 rc = block_init(dev_handle, BS_SIZE); 972 795 if (rc != EOK) { 973 async_answer_0(rid, rc);796 ipc_answer_0(rid, rc); 974 797 return; 975 798 } 976 799 977 800 /* prepare the boot block */ 978 rc = block_bb_read(dev map_handle, BS_BLOCK);801 rc = block_bb_read(dev_handle, BS_BLOCK * BS_SIZE, BS_SIZE); 979 802 if (rc != EOK) { 980 block_fini(dev map_handle);981 async_answer_0(rid, rc);803 block_fini(dev_handle); 804 ipc_answer_0(rid, rc); 982 805 return; 983 806 } 984 807 985 808 /* get the buffer with the boot sector */ 986 bs = block_bb_get(devmap_handle); 987 988 if (BPS(bs) != BS_SIZE) { 989 block_fini(devmap_handle); 990 async_answer_0(rid, ENOTSUP); 809 bs = block_bb_get(dev_handle); 810 811 /* Read the number of root directory entries. */ 812 bps = uint16_t_le2host(bs->bps); 813 rde = uint16_t_le2host(bs->root_ent_max); 814 815 if (bps != BS_SIZE) { 816 block_fini(dev_handle); 817 ipc_answer_0(rid, ENOTSUP); 991 818 return; 992 819 } 993 820 994 821 /* Initialize the block cache */ 995 rc = block_cache_init(dev map_handle, BPS(bs), 0 /* XXX */, cmode);822 rc = block_cache_init(dev_handle, bps, 0 /* XXX */, cmode); 996 823 if (rc != EOK) { 997 block_fini(devmap_handle); 998 async_answer_0(rid, rc); 999 return; 1000 } 1001 1002 /* Do some simple sanity checks on the file system. */ 1003 rc = fat_sanity_check(bs, devmap_handle); 824 block_fini(dev_handle); 825 ipc_answer_0(rid, rc); 826 return; 827 } 828 829 rc = fat_idx_init_by_dev_handle(dev_handle); 1004 830 if (rc != EOK) { 1005 (void) block_cache_fini(devmap_handle); 1006 block_fini(devmap_handle); 1007 async_answer_0(rid, rc); 1008 return; 1009 } 1010 1011 rc = fat_idx_init_by_devmap_handle(devmap_handle); 1012 if (rc != EOK) { 1013 (void) block_cache_fini(devmap_handle); 1014 block_fini(devmap_handle); 1015 async_answer_0(rid, rc); 831 block_fini(dev_handle); 832 ipc_answer_0(rid, rc); 1016 833 return; 1017 834 } … … 1020 837 fs_node_t *rfn = (fs_node_t *)malloc(sizeof(fs_node_t)); 1021 838 if (!rfn) { 1022 (void) block_cache_fini(devmap_handle); 1023 block_fini(devmap_handle); 1024 fat_idx_fini_by_devmap_handle(devmap_handle); 1025 async_answer_0(rid, ENOMEM); 839 block_fini(dev_handle); 840 fat_idx_fini_by_dev_handle(dev_handle); 841 ipc_answer_0(rid, ENOMEM); 1026 842 return; 1027 843 } … … 1030 846 if (!rootp) { 1031 847 free(rfn); 1032 (void) block_cache_fini(devmap_handle); 1033 block_fini(devmap_handle); 1034 fat_idx_fini_by_devmap_handle(devmap_handle); 1035 async_answer_0(rid, ENOMEM); 848 block_fini(dev_handle); 849 fat_idx_fini_by_dev_handle(dev_handle); 850 ipc_answer_0(rid, ENOMEM); 1036 851 return; 1037 852 } 1038 853 fat_node_initialize(rootp); 1039 854 1040 fat_idx_t *ridxp = fat_idx_get_by_pos(dev map_handle, FAT_CLST_ROOTPAR, 0);855 fat_idx_t *ridxp = fat_idx_get_by_pos(dev_handle, FAT_CLST_ROOTPAR, 0); 1041 856 if (!ridxp) { 1042 857 free(rfn); 1043 858 free(rootp); 1044 (void) block_cache_fini(devmap_handle); 1045 block_fini(devmap_handle); 1046 fat_idx_fini_by_devmap_handle(devmap_handle); 1047 async_answer_0(rid, ENOMEM); 859 block_fini(dev_handle); 860 fat_idx_fini_by_dev_handle(dev_handle); 861 ipc_answer_0(rid, ENOMEM); 1048 862 return; 1049 863 } … … 1055 869 rootp->refcnt = 1; 1056 870 rootp->lnkcnt = 0; /* FS root is not linked */ 1057 rootp->size = RDE(bs)* sizeof(fat_dentry_t);871 rootp->size = rde * sizeof(fat_dentry_t); 1058 872 rootp->idx = ridxp; 1059 873 ridxp->nodep = rootp; … … 1063 877 fibril_mutex_unlock(&ridxp->lock); 1064 878 1065 async_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt);879 ipc_answer_3(rid, EOK, ridxp->index, rootp->size, rootp->lnkcnt); 1066 880 } 1067 881 … … 1071 885 } 1072 886 1073 void fat_unmounted(ipc_callid_t rid, ipc_call_t *request)1074 {1075 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request);1076 fs_node_t *fn;1077 fat_node_t *nodep;1078 int rc;1079 1080 rc = fat_root_get(&fn, devmap_handle);1081 if (rc != EOK) {1082 async_answer_0(rid, rc);1083 return;1084 }1085 nodep = FAT_NODE(fn);1086 1087 /*1088 * We expect exactly two references on the root node. One for the1089 * fat_root_get() above and one created in fat_mounted().1090 */1091 if (nodep->refcnt != 2) {1092 (void) fat_node_put(fn);1093 async_answer_0(rid, EBUSY);1094 return;1095 }1096 1097 /*1098 * Put the root node and force it to the FAT free node list.1099 */1100 (void) fat_node_put(fn);1101 (void) fat_node_put(fn);1102 1103 /*1104 * Perform cleanup of the node structures, index structures and1105 * associated data. Write back this file system's dirty blocks and1106 * stop using libblock for this instance.1107 */1108 (void) fat_node_fini_by_devmap_handle(devmap_handle);1109 fat_idx_fini_by_devmap_handle(devmap_handle);1110 (void) block_cache_fini(devmap_handle);1111 block_fini(devmap_handle);1112 1113 async_answer_0(rid, EOK);1114 }1115 1116 void fat_unmount(ipc_callid_t rid, ipc_call_t *request)1117 {1118 libfs_unmount(&fat_libfs_ops, rid, request);1119 }1120 1121 887 void fat_lookup(ipc_callid_t rid, ipc_call_t *request) 1122 888 { … … 1126 892 void fat_read(ipc_callid_t rid, ipc_call_t *request) 1127 893 { 1128 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1129 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1130 aoff64_t pos = 1131 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request)); 1132 fs_node_t *fn; 894 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); 895 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); 896 off_t pos = (off_t)IPC_GET_ARG3(*request); 897 fs_node_t *fn = fat_node_get(dev_handle, index); 1133 898 fat_node_t *nodep; 1134 899 fat_bs_t *bs; 900 uint16_t bps; 1135 901 size_t bytes; 1136 902 block_t *b; 1137 int rc; 1138 1139 rc = fat_node_get(&fn, devmap_handle, index); 1140 if (rc != EOK) { 1141 async_answer_0(rid, rc); 1142 return; 1143 } 903 1144 904 if (!fn) { 1145 async_answer_0(rid, ENOENT);905 ipc_answer_0(rid, ENOENT); 1146 906 return; 1147 907 } … … 1150 910 ipc_callid_t callid; 1151 911 size_t len; 1152 if (! async_data_read_receive(&callid, &len)) {912 if (!ipc_data_read_receive(&callid, &len)) { 1153 913 fat_node_put(fn); 1154 async_answer_0(callid, EINVAL); 1155 async_answer_0(rid, EINVAL); 1156 return; 1157 } 1158 1159 bs = block_bb_get(devmap_handle); 914 ipc_answer_0(callid, EINVAL); 915 ipc_answer_0(rid, EINVAL); 916 return; 917 } 918 919 bs = block_bb_get(dev_handle); 920 bps = uint16_t_le2host(bs->bps); 1160 921 1161 922 if (nodep->type == FAT_FILE) { … … 1168 929 /* reading beyond the EOF */ 1169 930 bytes = 0; 1170 (void) async_data_read_finalize(callid, NULL, 0);931 (void) ipc_data_read_finalize(callid, NULL, 0); 1171 932 } else { 1172 bytes = min(len, BPS(bs) - pos % BPS(bs));933 bytes = min(len, bps - pos % bps); 1173 934 bytes = min(bytes, nodep->size - pos); 1174 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs),935 b = fat_block_get(bs, nodep, pos / bps, 1175 936 BLOCK_FLAGS_NONE); 1176 if (rc != EOK) { 1177 fat_node_put(fn); 1178 async_answer_0(callid, rc); 1179 async_answer_0(rid, rc); 1180 return; 1181 } 1182 (void) async_data_read_finalize(callid, 1183 b->data + pos % BPS(bs), bytes); 1184 rc = block_put(b); 1185 if (rc != EOK) { 1186 fat_node_put(fn); 1187 async_answer_0(rid, rc); 1188 return; 1189 } 937 (void) ipc_data_read_finalize(callid, b->data + pos % bps, 938 bytes); 939 block_put(b); 1190 940 } 1191 941 } else { 1192 942 unsigned bnum; 1193 aoff64_t spos = pos;943 off_t spos = pos; 1194 944 char name[FAT_NAME_LEN + 1 + FAT_EXT_LEN + 1]; 1195 945 fat_dentry_t *d; 1196 946 1197 947 assert(nodep->type == FAT_DIRECTORY); 1198 assert(nodep->size % BPS(bs)== 0);1199 assert( BPS(bs)% sizeof(fat_dentry_t) == 0);948 assert(nodep->size % bps == 0); 949 assert(bps % sizeof(fat_dentry_t) == 0); 1200 950 1201 951 /* … … 1205 955 * the position pointer accordingly. 1206 956 */ 1207 bnum = (pos * sizeof(fat_dentry_t)) / BPS(bs); 1208 while (bnum < nodep->size / BPS(bs)) { 1209 aoff64_t o; 1210 1211 rc = fat_block_get(&b, bs, nodep, bnum, 1212 BLOCK_FLAGS_NONE); 1213 if (rc != EOK) 1214 goto err; 1215 for (o = pos % (BPS(bs) / sizeof(fat_dentry_t)); 1216 o < BPS(bs) / sizeof(fat_dentry_t); 957 bnum = (pos * sizeof(fat_dentry_t)) / bps; 958 while (bnum < nodep->size / bps) { 959 off_t o; 960 961 b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE); 962 for (o = pos % (bps / sizeof(fat_dentry_t)); 963 o < bps / sizeof(fat_dentry_t); 1217 964 o++, pos++) { 1218 965 d = ((fat_dentry_t *)b->data) + o; … … 1222 969 continue; 1223 970 case FAT_DENTRY_LAST: 1224 rc = block_put(b); 1225 if (rc != EOK) 1226 goto err; 971 block_put(b); 1227 972 goto miss; 1228 973 default: 1229 974 case FAT_DENTRY_VALID: 1230 975 fat_dentry_name_get(d, name); 1231 rc = block_put(b); 1232 if (rc != EOK) 1233 goto err; 976 block_put(b); 1234 977 goto hit; 1235 978 } 1236 979 } 1237 rc = block_put(b); 1238 if (rc != EOK) 1239 goto err; 980 block_put(b); 1240 981 bnum++; 1241 982 } 1242 983 miss: 1243 rc = fat_node_put(fn); 1244 async_answer_0(callid, rc != EOK ? rc : ENOENT); 1245 async_answer_1(rid, rc != EOK ? rc : ENOENT, 0); 1246 return; 1247 1248 err: 1249 (void) fat_node_put(fn); 1250 async_answer_0(callid, rc); 1251 async_answer_0(rid, rc); 1252 return; 1253 984 fat_node_put(fn); 985 ipc_answer_0(callid, ENOENT); 986 ipc_answer_1(rid, ENOENT, 0); 987 return; 1254 988 hit: 1255 (void) async_data_read_finalize(callid, name, str_size(name) + 1);989 (void) ipc_data_read_finalize(callid, name, str_size(name) + 1); 1256 990 bytes = (pos - spos) + 1; 1257 991 } 1258 992 1259 rc =fat_node_put(fn);1260 async_answer_1(rid, rc, (sysarg_t)bytes);993 fat_node_put(fn); 994 ipc_answer_1(rid, EOK, (ipcarg_t)bytes); 1261 995 } 1262 996 1263 997 void fat_write(ipc_callid_t rid, ipc_call_t *request) 1264 998 { 1265 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1266 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1267 aoff64_t pos = 1268 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request)); 1269 fs_node_t *fn; 999 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); 1000 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); 1001 off_t pos = (off_t)IPC_GET_ARG3(*request); 1002 fs_node_t *fn = fat_node_get(dev_handle, index); 1270 1003 fat_node_t *nodep; 1271 1004 fat_bs_t *bs; 1272 size_t bytes , size;1005 size_t bytes; 1273 1006 block_t *b; 1274 aoff64_t boundary; 1007 uint16_t bps; 1008 unsigned spc; 1009 unsigned bpc; /* bytes per cluster */ 1010 off_t boundary; 1275 1011 int flags = BLOCK_FLAGS_NONE; 1276 int rc; 1277 1278 rc = fat_node_get(&fn, devmap_handle, index); 1279 if (rc != EOK) { 1280 async_answer_0(rid, rc); 1281 return; 1282 } 1012 1283 1013 if (!fn) { 1284 async_answer_0(rid, ENOENT);1014 ipc_answer_0(rid, ENOENT); 1285 1015 return; 1286 1016 } … … 1289 1019 ipc_callid_t callid; 1290 1020 size_t len; 1291 if (!async_data_write_receive(&callid, &len)) { 1292 (void) fat_node_put(fn); 1293 async_answer_0(callid, EINVAL); 1294 async_answer_0(rid, EINVAL); 1295 return; 1296 } 1297 1298 bs = block_bb_get(devmap_handle); 1021 if (!ipc_data_write_receive(&callid, &len)) { 1022 fat_node_put(fn); 1023 ipc_answer_0(callid, EINVAL); 1024 ipc_answer_0(rid, EINVAL); 1025 return; 1026 } 1027 1028 bs = block_bb_get(dev_handle); 1029 bps = uint16_t_le2host(bs->bps); 1030 spc = bs->spc; 1031 bpc = bps * spc; 1299 1032 1300 1033 /* … … 1305 1038 * value signalizing a smaller number of bytes written. 1306 1039 */ 1307 bytes = min(len, BPS(bs) - pos % BPS(bs));1308 if (bytes == BPS(bs))1040 bytes = min(len, bps - pos % bps); 1041 if (bytes == bps) 1309 1042 flags |= BLOCK_FLAGS_NOREAD; 1310 1043 1311 boundary = ROUND_UP(nodep->size, BPC(bs));1044 boundary = ROUND_UP(nodep->size, bpc); 1312 1045 if (pos < boundary) { 1313 1046 /* … … 1317 1050 * next block size boundary. 1318 1051 */ 1319 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); 1320 if (rc != EOK) { 1321 (void) fat_node_put(fn); 1322 async_answer_0(callid, rc); 1323 async_answer_0(rid, rc); 1324 return; 1325 } 1326 rc = fat_block_get(&b, bs, nodep, pos / BPS(bs), flags); 1327 if (rc != EOK) { 1328 (void) fat_node_put(fn); 1329 async_answer_0(callid, rc); 1330 async_answer_0(rid, rc); 1331 return; 1332 } 1333 (void) async_data_write_finalize(callid, 1334 b->data + pos % BPS(bs), bytes); 1052 fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); 1053 b = fat_block_get(bs, nodep, pos / bps, flags); 1054 (void) ipc_data_write_finalize(callid, b->data + pos % bps, 1055 bytes); 1335 1056 b->dirty = true; /* need to sync block */ 1336 rc = block_put(b); 1337 if (rc != EOK) { 1338 (void) fat_node_put(fn); 1339 async_answer_0(rid, rc); 1340 return; 1341 } 1057 block_put(b); 1342 1058 if (pos + bytes > nodep->size) { 1343 1059 nodep->size = pos + bytes; 1344 1060 nodep->dirty = true; /* need to sync node */ 1345 1061 } 1346 size = nodep->size; 1347 rc = fat_node_put(fn); 1348 async_answer_2(rid, rc, bytes, nodep->size); 1062 ipc_answer_2(rid, EOK, bytes, nodep->size); 1063 fat_node_put(fn); 1349 1064 return; 1350 1065 } else { … … 1353 1068 * clusters for the node and zero them out. 1354 1069 */ 1070 int status; 1355 1071 unsigned nclsts; 1356 1072 fat_cluster_t mcl, lcl; 1357 1073 1358 nclsts = (ROUND_UP(pos + bytes, BPC(bs)) - boundary) / BPC(bs);1074 nclsts = (ROUND_UP(pos + bytes, bpc) - boundary) / bpc; 1359 1075 /* create an independent chain of nclsts clusters in all FATs */ 1360 rc = fat_alloc_clusters(bs, devmap_handle, nclsts, &mcl, &lcl);1361 if ( rc!= EOK) {1076 status = fat_alloc_clusters(bs, dev_handle, nclsts, &mcl, &lcl); 1077 if (status != EOK) { 1362 1078 /* could not allocate a chain of nclsts clusters */ 1363 (void)fat_node_put(fn);1364 async_answer_0(callid, rc);1365 async_answer_0(rid, rc);1079 fat_node_put(fn); 1080 ipc_answer_0(callid, status); 1081 ipc_answer_0(rid, status); 1366 1082 return; 1367 1083 } 1368 1084 /* zero fill any gaps */ 1369 rc = fat_fill_gap(bs, nodep, mcl, pos); 1370 if (rc != EOK) { 1371 (void) fat_free_clusters(bs, devmap_handle, mcl); 1372 (void) fat_node_put(fn); 1373 async_answer_0(callid, rc); 1374 async_answer_0(rid, rc); 1375 return; 1376 } 1377 rc = _fat_block_get(&b, bs, devmap_handle, lcl, NULL, 1378 (pos / BPS(bs)) % SPC(bs), flags); 1379 if (rc != EOK) { 1380 (void) fat_free_clusters(bs, devmap_handle, mcl); 1381 (void) fat_node_put(fn); 1382 async_answer_0(callid, rc); 1383 async_answer_0(rid, rc); 1384 return; 1385 } 1386 (void) async_data_write_finalize(callid, 1387 b->data + pos % BPS(bs), bytes); 1085 fat_fill_gap(bs, nodep, mcl, pos); 1086 b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc, 1087 flags); 1088 (void) ipc_data_write_finalize(callid, b->data + pos % bps, 1089 bytes); 1388 1090 b->dirty = true; /* need to sync block */ 1389 rc = block_put(b); 1390 if (rc != EOK) { 1391 (void) fat_free_clusters(bs, devmap_handle, mcl); 1392 (void) fat_node_put(fn); 1393 async_answer_0(rid, rc); 1394 return; 1395 } 1091 block_put(b); 1396 1092 /* 1397 1093 * Append the cluster chain starting in mcl to the end of the 1398 1094 * node's cluster chain. 1399 1095 */ 1400 rc = fat_append_clusters(bs, nodep, mcl, lcl); 1401 if (rc != EOK) { 1402 (void) fat_free_clusters(bs, devmap_handle, mcl); 1403 (void) fat_node_put(fn); 1404 async_answer_0(rid, rc); 1405 return; 1406 } 1407 nodep->size = size = pos + bytes; 1096 fat_append_clusters(bs, nodep, mcl); 1097 nodep->size = pos + bytes; 1408 1098 nodep->dirty = true; /* need to sync node */ 1409 rc = fat_node_put(fn);1410 async_answer_2(rid, rc, bytes, size);1099 ipc_answer_2(rid, EOK, bytes, nodep->size); 1100 fat_node_put(fn); 1411 1101 return; 1412 1102 } … … 1415 1105 void fat_truncate(ipc_callid_t rid, ipc_call_t *request) 1416 1106 { 1417 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1418 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1419 aoff64_t size = 1420 (aoff64_t) MERGE_LOUP32(IPC_GET_ARG3(*request), IPC_GET_ARG4(*request)); 1421 fs_node_t *fn; 1107 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); 1108 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); 1109 size_t size = (off_t)IPC_GET_ARG3(*request); 1110 fs_node_t *fn = fat_node_get(dev_handle, index); 1422 1111 fat_node_t *nodep; 1423 1112 fat_bs_t *bs; 1113 uint16_t bps; 1114 uint8_t spc; 1115 unsigned bpc; /* bytes per cluster */ 1424 1116 int rc; 1425 1117 1426 rc = fat_node_get(&fn, devmap_handle, index);1427 if (rc != EOK) {1428 async_answer_0(rid, rc);1429 return;1430 }1431 1118 if (!fn) { 1432 async_answer_0(rid, ENOENT);1119 ipc_answer_0(rid, ENOENT); 1433 1120 return; 1434 1121 } 1435 1122 nodep = FAT_NODE(fn); 1436 1123 1437 bs = block_bb_get(devmap_handle); 1124 bs = block_bb_get(dev_handle); 1125 bps = uint16_t_le2host(bs->bps); 1126 spc = bs->spc; 1127 bpc = bps * spc; 1438 1128 1439 1129 if (nodep->size == size) { … … 1445 1135 */ 1446 1136 rc = EINVAL; 1447 } else if (ROUND_UP(nodep->size, BPC(bs)) == ROUND_UP(size, BPC(bs))) {1137 } else if (ROUND_UP(nodep->size, bpc) == ROUND_UP(size, bpc)) { 1448 1138 /* 1449 1139 * The node will be shrunk, but no clusters will be deallocated. … … 1457 1147 */ 1458 1148 if (size == 0) { 1459 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0); 1460 if (rc != EOK) 1461 goto out; 1149 fat_chop_clusters(bs, nodep, FAT_CLST_RES0); 1462 1150 } else { 1463 1151 fat_cluster_t lastc; 1464 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc, 1465 &lastc, NULL, (size - 1) / BPC(bs)); 1466 if (rc != EOK) 1467 goto out; 1468 rc = fat_chop_clusters(bs, nodep, lastc); 1469 if (rc != EOK) 1470 goto out; 1152 (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, 1153 &lastc, (size - 1) / bpc); 1154 fat_chop_clusters(bs, nodep, lastc); 1471 1155 } 1472 1156 nodep->size = size; … … 1474 1158 rc = EOK; 1475 1159 } 1476 out:1477 1160 fat_node_put(fn); 1478 async_answer_0(rid, rc);1161 ipc_answer_0(rid, rc); 1479 1162 return; 1480 1163 } … … 1482 1165 void fat_close(ipc_callid_t rid, ipc_call_t *request) 1483 1166 { 1484 async_answer_0(rid, EOK);1167 ipc_answer_0(rid, EOK); 1485 1168 } 1486 1169 1487 1170 void fat_destroy(ipc_callid_t rid, ipc_call_t *request) 1488 1171 { 1489 dev map_handle_t devmap_handle = (devmap_handle_t)IPC_GET_ARG1(*request);1172 dev_handle_t dev_handle = (dev_handle_t)IPC_GET_ARG1(*request); 1490 1173 fs_index_t index = (fs_index_t)IPC_GET_ARG2(*request); 1491 fs_node_t *fn;1492 fat_node_t *nodep;1493 1174 int rc; 1494 1175 1495 rc = fat_node_get(&fn, devmap_handle, index); 1496 if (rc != EOK) { 1497 async_answer_0(rid, rc); 1498 return; 1499 } 1176 fs_node_t *fn = fat_node_get(dev_handle, index); 1500 1177 if (!fn) { 1501 async_answer_0(rid, ENOENT); 1502 return; 1503 } 1504 1505 nodep = FAT_NODE(fn); 1506 /* 1507 * We should have exactly two references. One for the above 1508 * call to fat_node_get() and one from fat_unlink(). 1509 */ 1510 assert(nodep->refcnt == 2); 1178 ipc_answer_0(rid, ENOENT); 1179 return; 1180 } 1511 1181 1512 1182 rc = fat_destroy_node(fn); 1513 async_answer_0(rid, rc);1183 ipc_answer_0(rid, rc); 1514 1184 } 1515 1185 … … 1526 1196 void fat_sync(ipc_callid_t rid, ipc_call_t *request) 1527 1197 { 1528 devmap_handle_t devmap_handle = (devmap_handle_t) IPC_GET_ARG1(*request); 1529 fs_index_t index = (fs_index_t) IPC_GET_ARG2(*request); 1530 1531 fs_node_t *fn; 1532 int rc = fat_node_get(&fn, devmap_handle, index); 1533 if (rc != EOK) { 1534 async_answer_0(rid, rc); 1535 return; 1536 } 1537 if (!fn) { 1538 async_answer_0(rid, ENOENT); 1539 return; 1540 } 1541 1542 fat_node_t *nodep = FAT_NODE(fn); 1543 1544 nodep->dirty = true; 1545 rc = fat_node_sync(nodep); 1546 1547 fat_node_put(fn); 1548 async_answer_0(rid, rc); 1198 /* Dummy implementation */ 1199 ipc_answer_0(rid, EOK); 1549 1200 } 1550 1201
Note:
See TracChangeset
for help on using the changeset viewer.