Changeset 1dff985 in mainline for uspace/srv/vfs/vfs_ops.c
- Timestamp:
- 2017-03-03T21:32:38Z (8 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- c577a9a
- Parents:
- 5b46ec8 (diff), b8dbe2f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/vfs/vfs_ops.c
r5b46ec8 r1dff985 68 68 FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock); 69 69 70 vfs_pair_t rootfs = { 71 .fs_handle = 0, 72 .service_id = 0 73 }; 74 75 static int vfs_mount_internal(ipc_callid_t rid, service_id_t service_id, 76 fs_handle_t fs_handle, char *mp, char *opts) 77 { 78 vfs_lookup_res_t mp_res; 79 vfs_lookup_res_t mr_res; 80 vfs_node_t *mp_node = NULL; 81 vfs_node_t *mr_node; 82 fs_index_t rindex; 83 aoff64_t rsize; 84 unsigned rlnkcnt; 85 async_exch_t *exch; 86 sysarg_t rc; 87 aid_t msg; 70 vfs_node_t *root = NULL; 71 72 static int vfs_connect_internal(service_id_t service_id, unsigned flags, unsigned instance, 73 char *options, char *fsname, vfs_node_t **root) 74 { 75 fs_handle_t fs_handle = 0; 76 77 fibril_mutex_lock(&fs_list_lock); 78 while (1) { 79 fs_handle = fs_name_to_handle(instance, fsname, false); 80 81 if (fs_handle != 0 || !(flags & IPC_FLAG_BLOCKING)) { 82 break; 83 } 84 85 fibril_condvar_wait(&fs_list_cv, &fs_list_lock); 86 } 87 fibril_mutex_unlock(&fs_list_lock); 88 89 if (fs_handle == 0) { 90 return ENOENT; 91 } 92 93 /* Tell the mountee that it is being mounted. */ 88 94 ipc_call_t answer; 89 95 async_exch_t *exch = vfs_exchange_grab(fs_handle); 96 aid_t msg = async_send_1(exch, VFS_OUT_MOUNTED, (sysarg_t) service_id, &answer); 97 /* Send the mount options */ 98 sysarg_t rc = async_data_write_start(exch, options, str_size(options)); 99 if (rc != EOK) { 100 async_forget(msg); 101 vfs_exchange_release(exch); 102 return rc; 103 } 104 async_wait_for(msg, &rc); 105 vfs_exchange_release(exch); 106 107 if (rc != EOK) { 108 return rc; 109 } 110 111 vfs_lookup_res_t res; 112 res.triplet.fs_handle = fs_handle; 113 res.triplet.service_id = service_id; 114 res.triplet.index = (fs_index_t) IPC_GET_ARG1(answer); 115 res.size = (int64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), IPC_GET_ARG3(answer)); 116 res.type = VFS_NODE_DIRECTORY; 117 118 /* Add reference to the mounted root. */ 119 *root = vfs_node_get(&res); 120 assert(*root); 121 122 return EOK; 123 } 124 125 static int vfs_mount_internal(service_id_t service_id, unsigned flags, unsigned instance, 126 char *opts, char *fs_name, char *mp) 127 { 90 128 /* Resolve the path to the mountpoint. */ 91 fibril_rwlock_write_lock(&namespace_rwlock); 92 if (rootfs.fs_handle) { 93 /* We already have the root FS. */ 94 if (str_cmp(mp, "/") == 0) { 95 /* Trying to mount root FS over root FS */ 96 fibril_rwlock_write_unlock(&namespace_rwlock); 97 async_answer_0(rid, EBUSY); 98 return EBUSY; 99 } 100 101 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL); 102 if (rc != EOK) { 103 /* The lookup failed for some reason. */ 104 fibril_rwlock_write_unlock(&namespace_rwlock); 105 async_answer_0(rid, rc); 106 return rc; 107 } 108 109 mp_node = vfs_node_get(&mp_res); 110 if (!mp_node) { 111 fibril_rwlock_write_unlock(&namespace_rwlock); 112 async_answer_0(rid, ENOMEM); 113 return ENOMEM; 114 } 115 116 /* 117 * Now we hold a reference to mp_node. 118 * It will be dropped upon the corresponding VFS_IN_UNMOUNT. 119 * This prevents the mount point from being deleted. 120 */ 121 } else { 129 130 if (root == NULL) { 122 131 /* We still don't have the root file system mounted. */ 123 if (str_cmp(mp, "/") == 0) { 124 /* 125 * For this simple, but important case, 126 * we are almost done. 127 */ 128 129 /* Tell the mountee that it is being mounted. */ 130 exch = vfs_exchange_grab(fs_handle); 131 msg = async_send_1(exch, VFS_OUT_MOUNTED, 132 (sysarg_t) service_id, &answer); 133 /* Send the mount options */ 134 rc = async_data_write_start(exch, (void *)opts, 135 str_size(opts)); 136 vfs_exchange_release(exch); 137 138 if (rc != EOK) { 139 async_forget(msg); 140 fibril_rwlock_write_unlock(&namespace_rwlock); 141 async_answer_0(rid, rc); 142 return rc; 143 } 144 async_wait_for(msg, &rc); 145 146 if (rc != EOK) { 147 fibril_rwlock_write_unlock(&namespace_rwlock); 148 async_answer_0(rid, rc); 149 return rc; 150 } 151 152 rindex = (fs_index_t) IPC_GET_ARG1(answer); 153 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), 154 IPC_GET_ARG3(answer)); 155 rlnkcnt = (unsigned) IPC_GET_ARG4(answer); 156 157 mr_res.triplet.fs_handle = fs_handle; 158 mr_res.triplet.service_id = service_id; 159 mr_res.triplet.index = rindex; 160 mr_res.size = rsize; 161 mr_res.lnkcnt = rlnkcnt; 162 mr_res.type = VFS_NODE_DIRECTORY; 163 164 rootfs.fs_handle = fs_handle; 165 rootfs.service_id = service_id; 166 167 /* Add reference to the mounted root. */ 168 mr_node = vfs_node_get(&mr_res); 169 assert(mr_node); 170 171 fibril_rwlock_write_unlock(&namespace_rwlock); 172 async_answer_0(rid, rc); 173 return rc; 174 } else { 132 if (str_cmp(mp, "/") != 0) { 175 133 /* 176 134 * We can't resolve this without the root filesystem 177 135 * being mounted first. 178 136 */ 179 fibril_rwlock_write_unlock(&namespace_rwlock);180 async_answer_0(rid, ENOENT);181 137 return ENOENT; 182 138 } 183 } 184 185 /* 186 * At this point, we have all necessary pieces: file system handle 187 * and service ID, and we know the mount point VFS node. 188 */ 189 190 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle); 191 assert(mountee_exch); 192 193 exch = vfs_exchange_grab(mp_res.triplet.fs_handle); 194 msg = async_send_4(exch, VFS_OUT_MOUNT, 195 (sysarg_t) mp_res.triplet.service_id, 196 (sysarg_t) mp_res.triplet.index, 197 (sysarg_t) fs_handle, 198 (sysarg_t) service_id, &answer); 199 200 /* Send connection */ 201 rc = async_exchange_clone(exch, mountee_exch); 202 vfs_exchange_release(mountee_exch); 203 204 if (rc != EOK) { 205 vfs_exchange_release(exch); 206 async_forget(msg); 207 208 /* Mount failed, drop reference to mp_node. */ 209 if (mp_node) 210 vfs_node_put(mp_node); 211 212 async_answer_0(rid, rc); 213 fibril_rwlock_write_unlock(&namespace_rwlock); 139 140 return vfs_connect_internal(service_id, flags, instance, opts, fs_name, &root); 141 } 142 143 /* We already have the root FS. */ 144 if (str_cmp(mp, "/") == 0) { 145 /* Trying to mount root FS over root FS */ 146 return EBUSY; 147 } 148 149 vfs_lookup_res_t mp_res; 150 int rc = vfs_lookup_internal(root, mp, L_DIRECTORY, &mp_res); 151 if (rc != EOK) { 152 /* The lookup failed. */ 214 153 return rc; 215 154 } 216 155 217 /* send the mount options */ 218 rc = async_data_write_start(exch, (void *) opts, str_size(opts)); 219 if (rc != EOK) { 220 vfs_exchange_release(exch); 221 async_forget(msg); 222 223 /* Mount failed, drop reference to mp_node. */ 224 if (mp_node) 225 vfs_node_put(mp_node); 226 227 fibril_rwlock_write_unlock(&namespace_rwlock); 228 async_answer_0(rid, rc); 229 return rc; 230 } 231 232 /* 233 * Wait for the answer before releasing the exchange to avoid deadlock 234 * in case the answer depends on further calls to the same file system. 235 * Think of a case when mounting a FS on a file_bd backed by a file on 236 * the same FS. 237 */ 238 async_wait_for(msg, &rc); 239 vfs_exchange_release(exch); 240 241 if (rc == EOK) { 242 rindex = (fs_index_t) IPC_GET_ARG1(answer); 243 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), 244 IPC_GET_ARG3(answer)); 245 rlnkcnt = (unsigned) IPC_GET_ARG4(answer); 246 247 mr_res.triplet.fs_handle = fs_handle; 248 mr_res.triplet.service_id = service_id; 249 mr_res.triplet.index = rindex; 250 mr_res.size = rsize; 251 mr_res.lnkcnt = rlnkcnt; 252 mr_res.type = VFS_NODE_DIRECTORY; 253 254 /* Add reference to the mounted root. */ 255 mr_node = vfs_node_get(&mr_res); 256 assert(mr_node); 257 } else { 258 /* Mount failed, drop reference to mp_node. */ 259 if (mp_node) 260 vfs_node_put(mp_node); 261 } 262 263 async_answer_0(rid, rc); 264 fibril_rwlock_write_unlock(&namespace_rwlock); 265 return rc; 156 vfs_node_t *mp_node; 157 mp_node = vfs_node_get(&mp_res); 158 if (!mp_node) { 159 return ENOMEM; 160 } 161 162 if (mp_node->mount != NULL) { 163 return EBUSY; 164 } 165 166 if (mp_node->type != VFS_NODE_DIRECTORY) { 167 printf("%s node not a directory, type=%d\n", mp, mp_node->type); 168 return ENOTDIR; 169 } 170 171 if (vfs_node_has_children(mp_node)) { 172 return ENOTEMPTY; 173 } 174 175 vfs_node_t *mountee; 176 177 rc = vfs_connect_internal(service_id, flags, instance, opts, fs_name, &mountee); 178 if (rc != EOK) { 179 vfs_node_put(mp_node); 180 return ENOMEM; 181 } 182 183 mp_node->mount = mountee; 184 /* The two references to nodes are held by the mount so that they cannot be freed. 185 * They are removed in detach_internal(). 186 */ 187 return EOK; 266 188 } 267 189 268 190 void vfs_mount_srv(ipc_callid_t rid, ipc_call_t *request) 269 191 { 270 service_id_t service_id;271 272 192 /* 273 193 * We expect the library to do the device-name to device-handle … … 275 195 * in the request. 276 196 */ 277 service_id = (service_id_t) IPC_GET_ARG1(*request);197 service_id_t service_id = (service_id_t) IPC_GET_ARG1(*request); 278 198 279 199 /* … … 301 221 0, NULL); 302 222 if (rc != EOK) { 223 async_answer_0(rid, rc); 303 224 free(mp); 304 async_answer_0(rid, rc);305 225 return; 306 226 } … … 314 234 FS_NAME_MAXLEN, 0, NULL); 315 235 if (rc != EOK) { 236 async_answer_0(rid, rc); 316 237 free(mp); 317 238 free(opts); 318 async_answer_0(rid, rc); 319 return; 320 } 321 322 /* 323 * Wait for VFS_IN_PING so that we can return an error if we don't know 324 * fs_name. 325 */ 326 ipc_call_t data; 327 ipc_callid_t callid = async_get_call(&data); 328 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) { 329 async_answer_0(callid, ENOTSUP); 330 async_answer_0(rid, ENOTSUP); 331 free(mp); 332 free(opts); 333 free(fs_name); 334 return; 335 } 336 337 /* 338 * Check if we know a file system with the same name as is in fs_name. 339 * This will also give us its file system handle. 340 */ 341 fibril_mutex_lock(&fs_list_lock); 342 fs_handle_t fs_handle; 343 recheck: 344 fs_handle = fs_name_to_handle(instance, fs_name, false); 345 if (!fs_handle) { 346 if (flags & IPC_FLAG_BLOCKING) { 347 fibril_condvar_wait(&fs_list_cv, &fs_list_lock); 348 goto recheck; 349 } 350 351 fibril_mutex_unlock(&fs_list_lock); 352 async_answer_0(callid, ENOENT); 353 async_answer_0(rid, ENOENT); 354 free(mp); 355 free(fs_name); 356 free(opts); 357 return; 358 } 359 fibril_mutex_unlock(&fs_list_lock); 360 239 return; 240 } 241 361 242 /* Add the filesystem info to the list of mounted filesystems */ 362 243 mtab_ent_t *mtab_ent = malloc(sizeof(mtab_ent_t)); 363 244 if (!mtab_ent) { 364 async_answer_0(callid, ENOMEM);365 245 async_answer_0(rid, ENOMEM); 366 246 free(mp); … … 369 249 return; 370 250 } 371 372 /* Do the mount */ 373 rc = vfs_mount_internal(rid, service_id, fs_handle, mp, opts); 374 if (rc != EOK) { 375 async_answer_0(callid, ENOTSUP); 376 async_answer_0(rid, ENOTSUP); 377 free(mtab_ent); 378 free(mp); 379 free(opts); 380 free(fs_name); 381 return; 382 } 251 252 /* Mount the filesystem. */ 253 fibril_rwlock_write_lock(&namespace_rwlock); 254 rc = vfs_mount_internal(service_id, flags, instance, opts, fs_name, mp); 255 fibril_rwlock_write_unlock(&namespace_rwlock); 383 256 384 257 /* Add the filesystem info to the list of mounted filesystems */ 385 386 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp); 387 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name); 388 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts); 389 mtab_ent->instance = instance; 390 mtab_ent->service_id = service_id; 391 392 link_initialize(&mtab_ent->link); 393 394 fibril_mutex_lock(&mtab_list_lock); 395 list_append(&mtab_ent->link, &mtab_list); 396 mtab_size++; 397 fibril_mutex_unlock(&mtab_list_lock); 258 if (rc == EOK) { 259 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp); 260 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name); 261 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts); 262 mtab_ent->instance = instance; 263 mtab_ent->service_id = service_id; 264 265 link_initialize(&mtab_ent->link); 266 267 fibril_mutex_lock(&mtab_list_lock); 268 list_append(&mtab_ent->link, &mtab_list); 269 mtab_size++; 270 fibril_mutex_unlock(&mtab_list_lock); 271 } 272 273 async_answer_0(rid, rc); 398 274 399 275 free(mp); 400 276 free(fs_name); 401 277 free(opts); 402 403 /* Acknowledge that we know fs_name. */404 async_answer_0(callid, EOK);405 278 } 406 279 407 280 void vfs_unmount_srv(ipc_callid_t rid, ipc_call_t *request) 408 281 { 409 int rc; 282 /* 283 * Receive the mount point path. 284 */ 410 285 char *mp; 411 vfs_lookup_res_t mp_res; 412 vfs_lookup_res_t mr_res; 413 vfs_node_t *mr_node; 414 async_exch_t *exch; 415 416 /* 417 * Receive the mount point path. 418 */ 419 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN, 286 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN, 420 287 0, NULL); 421 288 if (rc != EOK) … … 431 298 */ 432 299 fibril_rwlock_write_lock(&namespace_rwlock); 433 434 /* 435 * Lookup the mounted root and instantiate it. 436 */ 437 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL); 438 if (rc != EOK) { 439 fibril_rwlock_write_unlock(&namespace_rwlock); 300 301 if (str_cmp(mp, "/") == 0) { 440 302 free(mp); 441 async_answer_0(rid, rc);442 return;443 }444 mr_node = vfs_node_get(&mr_res);445 if (!mr_node) {446 fibril_rwlock_write_unlock(&namespace_rwlock);447 free(mp);448 async_answer_0(rid, ENOMEM);449 return;450 }451 452 /*453 * Count the total number of references for the mounted file system. We454 * are expecting at least two. One which we got above and one which we455 * got when the file system was mounted. If we find more, it means that456 * the file system cannot be gracefully unmounted at the moment because457 * someone is working with it.458 */459 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle,460 mr_node->service_id) != 2) {461 fibril_rwlock_write_unlock(&namespace_rwlock);462 vfs_node_put(mr_node);463 free(mp);464 async_answer_0(rid, EBUSY);465 return;466 }467 468 if (str_cmp(mp, "/") == 0) {469 303 470 304 /* … … 475 309 */ 476 310 477 exch = vfs_exchange_grab(mr_node->fs_handle); 478 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, 479 mr_node->service_id); 311 if (!root) { 312 fibril_rwlock_write_unlock(&namespace_rwlock); 313 async_answer_0(rid, ENOENT); 314 return; 315 } 316 317 /* 318 * Count the total number of references for the mounted file system. We 319 * are expecting at least one, which we got when the file system was mounted. 320 * If we find more, it means that 321 * the file system cannot be gracefully unmounted at the moment because 322 * someone is working with it. 323 */ 324 if (vfs_nodes_refcount_sum_get(root->fs_handle, root->service_id) != 1) { 325 fibril_rwlock_write_unlock(&namespace_rwlock); 326 async_answer_0(rid, EBUSY); 327 return; 328 } 329 330 async_exch_t *exch = vfs_exchange_grab(root->fs_handle); 331 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, root->service_id); 480 332 vfs_exchange_release(exch); 481 333 482 if (rc != EOK) { 483 fibril_rwlock_write_unlock(&namespace_rwlock); 484 free(mp); 485 vfs_node_put(mr_node); 486 async_answer_0(rid, rc); 487 return; 488 } 489 490 rootfs.fs_handle = 0; 491 rootfs.service_id = 0; 492 } else { 493 494 /* 495 * Unmounting a non-root file system. 496 * 497 * We have a regular mount point node representing the parent 498 * file system, so we delegate the operation to it. 499 */ 500 501 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL); 502 if (rc != EOK) { 503 fibril_rwlock_write_unlock(&namespace_rwlock); 504 free(mp); 505 vfs_node_put(mr_node); 506 async_answer_0(rid, rc); 507 return; 508 } 509 510 vfs_node_t *mp_node = vfs_node_get(&mp_res); 511 if (!mp_node) { 512 fibril_rwlock_write_unlock(&namespace_rwlock); 513 free(mp); 514 vfs_node_put(mr_node); 515 async_answer_0(rid, ENOMEM); 516 return; 517 } 518 519 exch = vfs_exchange_grab(mp_node->fs_handle); 520 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT, 521 mp_node->service_id, mp_node->index); 522 vfs_exchange_release(exch); 523 524 if (rc != EOK) { 525 fibril_rwlock_write_unlock(&namespace_rwlock); 526 free(mp); 527 vfs_node_put(mp_node); 528 vfs_node_put(mr_node); 529 async_answer_0(rid, rc); 530 return; 531 } 532 533 /* Drop the reference we got above. */ 334 fibril_rwlock_write_unlock(&namespace_rwlock); 335 if (rc == EOK) { 336 vfs_node_forget(root); 337 root = NULL; 338 } 339 async_answer_0(rid, rc); 340 return; 341 } 342 343 /* 344 * Lookup the mounted root and instantiate it. 345 */ 346 vfs_lookup_res_t mp_res; 347 rc = vfs_lookup_internal(root, mp, L_MP, &mp_res); 348 if (rc != EOK) { 349 fibril_rwlock_write_unlock(&namespace_rwlock); 350 free(mp); 351 async_answer_0(rid, rc); 352 return; 353 } 354 vfs_node_t *mp_node = vfs_node_get(&mp_res); 355 if (!mp_node) { 356 fibril_rwlock_write_unlock(&namespace_rwlock); 357 free(mp); 358 async_answer_0(rid, ENOMEM); 359 return; 360 } 361 362 if (mp_node->mount == NULL) { 363 fibril_rwlock_write_unlock(&namespace_rwlock); 534 364 vfs_node_put(mp_node); 535 /* Drop the reference from when the file system was mounted. */ 365 free(mp); 366 async_answer_0(rid, ENOENT); 367 return; 368 } 369 370 /* 371 * Count the total number of references for the mounted file system. We 372 * are expecting at least one, which we got when the file system was mounted. 373 * If we find more, it means that 374 * the file system cannot be gracefully unmounted at the moment because 375 * someone is working with it. 376 */ 377 if (vfs_nodes_refcount_sum_get(mp_node->mount->fs_handle, mp_node->mount->service_id) != 1) { 378 fibril_rwlock_write_unlock(&namespace_rwlock); 536 379 vfs_node_put(mp_node); 537 } 538 539 /* 540 * All went well, the mounted file system was successfully unmounted. 541 * The only thing left is to forget the unmounted root VFS node. 542 */ 543 vfs_node_forget(mr_node); 380 free(mp); 381 async_answer_0(rid, EBUSY); 382 return; 383 } 384 385 /* Unmount the filesystem. */ 386 async_exch_t *exch = vfs_exchange_grab(mp_node->mount->fs_handle); 387 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, mp_node->mount->service_id); 388 vfs_exchange_release(exch); 389 390 vfs_node_forget(mp_node->mount); 391 mp_node->mount = NULL; 392 393 vfs_node_put(mp_node); 544 394 fibril_rwlock_write_unlock(&namespace_rwlock); 545 395 546 396 fibril_mutex_lock(&mtab_list_lock); 547 548 397 int found = 0; 549 398 … … 560 409 fibril_mutex_unlock(&mtab_list_lock); 561 410 562 free(mp); 563 411 free(mp); 412 564 413 async_answer_0(rid, EOK); 565 } 566 567 void vfs_open(ipc_callid_t rid, ipc_call_t *request) 568 { 569 /* 570 * The POSIX interface is open(path, oflag, mode). 571 * We can receive oflags and mode along with the VFS_IN_OPEN call; 572 * the path will need to arrive in another call. 573 * 574 * We also receive one private, non-POSIX set of flags called lflag 575 * used to pass information to vfs_lookup_internal(). 576 */ 577 int lflag = IPC_GET_ARG1(*request); 578 int oflag = IPC_GET_ARG2(*request); 579 int mode = IPC_GET_ARG3(*request); 580 581 /* Ignore mode for now. */ 582 (void) mode; 583 584 /* 585 * Make sure that we are called with exactly one of L_FILE and 586 * L_DIRECTORY. Make sure that the user does not pass L_OPEN, 587 * L_ROOT or L_MP. 588 */ 589 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) || 590 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) || 591 (lflag & (L_OPEN | L_ROOT | L_MP))) { 414 return; 415 } 416 417 static inline bool walk_flags_valid(int flags) 418 { 419 if ((flags&~WALK_ALL_FLAGS) != 0) { 420 return false; 421 } 422 if ((flags&WALK_MAY_CREATE) && (flags&WALK_MUST_CREATE)) { 423 return false; 424 } 425 if ((flags&WALK_REGULAR) && (flags&WALK_DIRECTORY)) { 426 return false; 427 } 428 if ((flags&WALK_MAY_CREATE) || (flags&WALK_MUST_CREATE)) { 429 if (!(flags&WALK_DIRECTORY) && !(flags&WALK_REGULAR)) { 430 return false; 431 } 432 } 433 return true; 434 } 435 436 static inline int walk_lookup_flags(int flags) 437 { 438 int lflags = 0; 439 if (flags&WALK_MAY_CREATE || flags&WALK_MUST_CREATE) { 440 lflags |= L_CREATE; 441 } 442 if (flags&WALK_MUST_CREATE) { 443 lflags |= L_EXCLUSIVE; 444 } 445 if (flags&WALK_REGULAR) { 446 lflags |= L_FILE; 447 } 448 if (flags&WALK_DIRECTORY) { 449 lflags |= L_DIRECTORY; 450 } 451 return lflags; 452 } 453 454 void vfs_walk(ipc_callid_t rid, ipc_call_t *request) 455 { 456 /* 457 * Parent is our relative root for file lookup. 458 * For defined flags, see <ipc/vfs.h>. 459 */ 460 int parentfd = IPC_GET_ARG1(*request); 461 int flags = IPC_GET_ARG2(*request); 462 463 if (!walk_flags_valid(flags)) { 592 464 async_answer_0(rid, EINVAL); 593 465 return; 594 466 } 595 467 596 if (oflag & O_CREAT)597 lflag |= L_CREATE;598 if (oflag & O_EXCL)599 lflag |= L_EXCLUSIVE;600 601 468 char *path; 602 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 603 if (rc != EOK) { 469 int rc = async_data_write_accept((void **)&path, true, 0, 0, 0, NULL); 470 471 /* Lookup the file structure corresponding to the file descriptor. */ 472 vfs_file_t *parent = NULL; 473 vfs_node_t *parent_node = root; 474 // TODO: Client-side root. 475 if (parentfd != -1) { 476 parent = vfs_file_get(parentfd); 477 if (!parent) { 478 free(path); 479 async_answer_0(rid, EBADF); 480 return; 481 } 482 parent_node = parent->node; 483 } 484 485 fibril_rwlock_read_lock(&namespace_rwlock); 486 487 vfs_lookup_res_t lr; 488 rc = vfs_lookup_internal(parent_node, path, walk_lookup_flags(flags), &lr); 489 free(path); 490 491 if (rc != EOK) { 492 fibril_rwlock_read_unlock(&namespace_rwlock); 493 if (parent) { 494 vfs_file_put(parent); 495 } 604 496 async_answer_0(rid, rc); 605 497 return; 606 498 } 607 499 608 /*609 * Avoid the race condition in which the file can be deleted before we610 * find/create-and-lock the VFS node corresponding to the looked-up611 * triplet.612 */613 if (lflag & L_CREATE)614 fibril_rwlock_write_lock(&namespace_rwlock);615 else616 fibril_rwlock_read_lock(&namespace_rwlock);617 618 /* The path is now populated and we can call vfs_lookup_internal(). */619 vfs_lookup_res_t lr;620 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL);621 if (rc != EOK) {622 if (lflag & L_CREATE)623 fibril_rwlock_write_unlock(&namespace_rwlock);624 else625 fibril_rwlock_read_unlock(&namespace_rwlock);626 async_answer_0(rid, rc);627 free(path);628 return;629 }630 631 /* Path is no longer needed. */632 free(path);633 634 500 vfs_node_t *node = vfs_node_get(&lr); 635 if (lflag & L_CREATE) 636 fibril_rwlock_write_unlock(&namespace_rwlock); 637 else 638 fibril_rwlock_read_unlock(&namespace_rwlock); 639 640 if (!node) { 641 async_answer_0(rid, ENOMEM); 642 return; 643 } 644 645 /* Truncate the file if requested and if necessary. */ 646 if (oflag & O_TRUNC) { 647 fibril_rwlock_write_lock(&node->contents_rwlock); 648 if (node->size) { 649 rc = vfs_truncate_internal(node->fs_handle, 650 node->service_id, node->index, 0); 651 if (rc) { 652 fibril_rwlock_write_unlock(&node->contents_rwlock); 653 vfs_node_put(node); 654 async_answer_0(rid, rc); 655 return; 656 } 657 node->size = 0; 658 } 659 fibril_rwlock_write_unlock(&node->contents_rwlock); 660 } 661 662 /* 663 * Get ourselves a file descriptor and the corresponding vfs_file_t 664 * structure. 665 */ 666 int fd = vfs_fd_alloc((oflag & O_DESC) != 0); 501 502 int fd = vfs_fd_alloc(false); 667 503 if (fd < 0) { 668 504 vfs_node_put(node); 505 if (parent) { 506 vfs_file_put(parent); 507 } 669 508 async_answer_0(rid, fd); 670 509 return; 671 510 } 511 672 512 vfs_file_t *file = vfs_file_get(fd); 673 assert(file); 513 assert(file != NULL); 514 674 515 file->node = node; 675 if (oflag & O_APPEND) 676 file->append = true; 677 678 /* 679 * The following increase in reference count is for the fact that the 680 * file is being opened and that a file structure is pointing to it. 681 * It is necessary so that the file will not disappear when 682 * vfs_node_put() is called. The reference will be dropped by the 683 * respective VFS_IN_CLOSE. 684 */ 685 vfs_node_addref(node); 686 vfs_node_put(node); 516 if (parent) { 517 file->permissions = parent->permissions; 518 } else { 519 file->permissions = MODE_READ | MODE_WRITE | MODE_APPEND; 520 } 521 file->open_read = false; 522 file->open_write = false; 523 687 524 vfs_file_put(file); 688 689 /* Success! Return the new file descriptor to the client. */ 525 if (parent) { 526 vfs_file_put(parent); 527 } 528 529 fibril_rwlock_read_unlock(&namespace_rwlock); 530 690 531 async_answer_1(rid, EOK, fd); 532 } 533 534 void vfs_open2(ipc_callid_t rid, ipc_call_t *request) 535 { 536 int fd = IPC_GET_ARG1(*request); 537 int flags = IPC_GET_ARG2(*request); 538 539 if (flags == 0) { 540 async_answer_0(rid, EINVAL); 541 return; 542 } 543 544 vfs_file_t *file = vfs_file_get(fd); 545 if (!file) { 546 async_answer_0(rid, EBADF); 547 return; 548 } 549 550 if ((flags & ~file->permissions) != 0) { 551 vfs_file_put(file); 552 async_answer_0(rid, EPERM); 553 return; 554 } 555 556 file->open_read = (flags & MODE_READ) != 0; 557 file->open_write = (flags & (MODE_WRITE | MODE_APPEND)) != 0; 558 file->append = (flags & MODE_APPEND) != 0; 559 560 if (!file->open_read && !file->open_write) { 561 vfs_file_put(file); 562 async_answer_0(rid, EINVAL); 563 return; 564 } 565 566 if (file->node->type == VFS_NODE_DIRECTORY && file->open_write) { 567 file->open_read = file->open_write = false; 568 vfs_file_put(file); 569 async_answer_0(rid, EINVAL); 570 return; 571 } 572 573 int rc = vfs_open_node_remote(file->node); 574 if (rc != EOK) { 575 file->open_read = file->open_write = false; 576 vfs_file_put(file); 577 async_answer_0(rid, rc); 578 return; 579 } 580 581 vfs_file_put(file); 582 async_answer_0(rid, EOK); 691 583 } 692 584 … … 816 708 fibril_mutex_lock(&file->lock); 817 709 710 if ((read && !file->open_read) || (!read && !file->open_write)) { 711 fibril_mutex_unlock(&file->lock); 712 return EINVAL; 713 } 714 818 715 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle); 819 716 assert(fs_info); … … 854 751 size_t bytes = IPC_GET_ARG1(answer); 855 752 856 if (file->node->type == VFS_NODE_DIRECTORY) 753 if (file->node->type == VFS_NODE_DIRECTORY) { 857 754 fibril_rwlock_read_unlock(&namespace_rwlock); 755 } 858 756 859 757 /* Unlock the VFS node. */ … … 953 851 case SEEK_END: 954 852 fibril_rwlock_read_lock(&file->node->contents_rwlock); 955 aoff64_t size = file->node->size;853 aoff64_t size = vfs_node_get_size(file->node); 956 854 957 855 if ((off >= 0) && (size + off < size)) { … … 1061 959 } 1062 960 1063 void vfs_stat(ipc_callid_t rid, ipc_call_t *request) 1064 { 961 static void out_destroy(vfs_triplet_t *file) 962 { 963 async_exch_t *exch = vfs_exchange_grab(file->fs_handle); 964 async_msg_2(exch, VFS_OUT_DESTROY, 965 (sysarg_t) file->service_id, (sysarg_t) file->index); 966 vfs_exchange_release(exch); 967 } 968 969 void vfs_unlink2(ipc_callid_t rid, ipc_call_t *request) 970 { 971 int rc; 1065 972 char *path; 1066 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 973 vfs_file_t *parent = NULL; 974 vfs_file_t *expect = NULL; 975 vfs_node_t *parent_node = root; 976 977 int parentfd = IPC_GET_ARG1(*request); 978 int expectfd = IPC_GET_ARG2(*request); 979 int wflag = IPC_GET_ARG3(*request); 980 981 rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 1067 982 if (rc != EOK) { 1068 983 async_answer_0(rid, rc); … … 1070 985 } 1071 986 1072 ipc_callid_t callid; 1073 if (!async_data_read_receive(&callid, NULL)) { 987 fibril_rwlock_write_lock(&namespace_rwlock); 988 989 int lflag = (wflag&WALK_DIRECTORY) ? L_DIRECTORY: 0; 990 991 if (parentfd >= 0) { 992 parent = vfs_file_get(parentfd); 993 if (!parent) { 994 rc = ENOENT; 995 goto exit; 996 } 997 parent_node = parent->node; 998 } 999 1000 if (expectfd >= 0) { 1001 expect = vfs_file_get(expectfd); 1002 if (!expect) { 1003 rc = ENOENT; 1004 goto exit; 1005 } 1006 1007 vfs_lookup_res_t lr; 1008 rc = vfs_lookup_internal(parent_node, path, lflag, &lr); 1009 if (rc != EOK) { 1010 goto exit; 1011 } 1012 1013 if (__builtin_memcmp(&lr.triplet, expect->node, sizeof(vfs_triplet_t)) != 0) { 1014 rc = ENOENT; 1015 goto exit; 1016 } 1017 1018 vfs_file_put(expect); 1019 expect = NULL; 1020 } 1021 1022 vfs_lookup_res_t lr; 1023 rc = vfs_lookup_internal(parent_node, path, lflag | L_UNLINK, &lr); 1024 if (rc != EOK) { 1025 goto exit; 1026 } 1027 1028 /* If the node is not held by anyone, try to destroy it. */ 1029 if (vfs_node_peek(&lr) == NULL) { 1030 out_destroy(&lr.triplet); 1031 } 1032 1033 exit: 1034 if (path) { 1074 1035 free(path); 1075 async_answer_0(callid, EINVAL); 1076 async_answer_0(rid, EINVAL); 1077 return; 1078 } 1079 1080 vfs_lookup_res_t lr; 1081 fibril_rwlock_read_lock(&namespace_rwlock); 1082 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL); 1083 free(path); 1084 if (rc != EOK) { 1085 fibril_rwlock_read_unlock(&namespace_rwlock); 1086 async_answer_0(callid, rc); 1087 async_answer_0(rid, rc); 1088 return; 1089 } 1090 vfs_node_t *node = vfs_node_get(&lr); 1091 if (!node) { 1092 fibril_rwlock_read_unlock(&namespace_rwlock); 1093 async_answer_0(callid, ENOMEM); 1094 async_answer_0(rid, ENOMEM); 1095 return; 1096 } 1097 1098 fibril_rwlock_read_unlock(&namespace_rwlock); 1099 1100 async_exch_t *exch = vfs_exchange_grab(node->fs_handle); 1101 1102 aid_t msg; 1103 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id, 1104 node->index, false, NULL); 1105 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME); 1106 1107 vfs_exchange_release(exch); 1108 1109 sysarg_t rv; 1110 async_wait_for(msg, &rv); 1111 1112 async_answer_0(rid, rv); 1113 1114 vfs_node_put(node); 1115 } 1116 1117 void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request) 1118 { 1119 int mode = IPC_GET_ARG1(*request); 1120 1121 char *path; 1122 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 1123 if (rc != EOK) { 1124 async_answer_0(rid, rc); 1125 return; 1126 } 1127 1128 /* Ignore mode for now. */ 1129 (void) mode; 1036 } 1037 if (parent) { 1038 vfs_file_put(parent); 1039 } 1040 if (expect) { 1041 vfs_file_put(expect); 1042 } 1043 fibril_rwlock_write_unlock(&namespace_rwlock); 1044 async_answer_0(rid, rc); 1045 } 1046 1047 static size_t shared_path(char *a, char *b) 1048 { 1049 size_t res = 0; 1050 1051 while (a[res] == b[res] && a[res] != 0) { 1052 res++; 1053 } 1054 1055 if (a[res] == b[res]) { 1056 return res; 1057 } 1058 1059 res--; 1060 while (a[res] != '/') { 1061 res--; 1062 } 1063 return res; 1064 } 1065 1066 static int vfs_rename_internal(vfs_node_t *base, char *old, char *new) 1067 { 1068 assert(base != NULL); 1069 assert(old != NULL); 1070 assert(new != NULL); 1071 1072 vfs_lookup_res_t base_lr; 1073 vfs_lookup_res_t old_lr; 1074 vfs_lookup_res_t new_lr_orig; 1075 bool orig_unlinked = false; 1076 1077 int rc; 1078 1079 size_t shared = shared_path(old, new); 1080 1081 /* Do not allow one path to be a prefix of the other. */ 1082 if (old[shared] == 0 || new[shared] == 0) { 1083 return EINVAL; 1084 } 1085 assert(old[shared] == '/'); 1086 assert(new[shared] == '/'); 1130 1087 1131 1088 fibril_rwlock_write_lock(&namespace_rwlock); 1132 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE; 1133 rc = vfs_lookup_internal(path, lflag, NULL, NULL); 1089 1090 /* Resolve the shared portion of the path first. */ 1091 if (shared != 0) { 1092 old[shared] = 0; 1093 rc = vfs_lookup_internal(base, old, L_DIRECTORY, &base_lr); 1094 if (rc != EOK) { 1095 fibril_rwlock_write_unlock(&namespace_rwlock); 1096 return rc; 1097 } 1098 1099 base = vfs_node_get(&base_lr); 1100 old[shared] = '/'; 1101 old += shared; 1102 new += shared; 1103 } else { 1104 vfs_node_addref(base); 1105 } 1106 1107 1108 rc = vfs_lookup_internal(base, new, L_UNLINK | L_DISABLE_MOUNTS, &new_lr_orig); 1109 if (rc == EOK) { 1110 orig_unlinked = true; 1111 } else if (rc != ENOENT) { 1112 vfs_node_put(base); 1113 fibril_rwlock_write_unlock(&namespace_rwlock); 1114 return rc; 1115 } 1116 1117 rc = vfs_lookup_internal(base, old, L_UNLINK | L_DISABLE_MOUNTS, &old_lr); 1118 if (rc != EOK) { 1119 if (orig_unlinked) { 1120 vfs_link_internal(base, new, &new_lr_orig.triplet); 1121 } 1122 vfs_node_put(base); 1123 fibril_rwlock_write_unlock(&namespace_rwlock); 1124 return rc; 1125 } 1126 1127 rc = vfs_link_internal(base, new, &old_lr.triplet); 1128 if (rc != EOK) { 1129 vfs_link_internal(base, old, &old_lr.triplet); 1130 if (orig_unlinked) { 1131 vfs_link_internal(base, new, &new_lr_orig.triplet); 1132 } 1133 vfs_node_put(base); 1134 fibril_rwlock_write_unlock(&namespace_rwlock); 1135 return rc; 1136 } 1137 1138 /* If the node is not held by anyone, try to destroy it. */ 1139 if (orig_unlinked && vfs_node_peek(&new_lr_orig) == NULL) { 1140 out_destroy(&new_lr_orig.triplet); 1141 } 1142 1143 vfs_node_put(base); 1134 1144 fibril_rwlock_write_unlock(&namespace_rwlock); 1135 free(path); 1136 async_answer_0(rid, rc); 1137 } 1138 1139 void vfs_unlink(ipc_callid_t rid, ipc_call_t *request) 1140 { 1141 int lflag = IPC_GET_ARG1(*request); 1142 1143 char *path; 1144 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 1145 if (rc != EOK) { 1146 async_answer_0(rid, rc); 1147 return; 1148 } 1149 1150 fibril_rwlock_write_lock(&namespace_rwlock); 1151 lflag &= L_DIRECTORY; /* sanitize lflag */ 1152 vfs_lookup_res_t lr; 1153 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL); 1154 free(path); 1155 if (rc != EOK) { 1156 fibril_rwlock_write_unlock(&namespace_rwlock); 1157 async_answer_0(rid, rc); 1158 return; 1159 } 1160 1161 /* 1162 * The name has already been unlinked by vfs_lookup_internal(). 1163 * We have to get and put the VFS node to ensure that it is 1164 * VFS_OUT_DESTROY'ed after the last reference to it is dropped. 1165 */ 1166 vfs_node_t *node = vfs_node_get(&lr); 1167 fibril_mutex_lock(&nodes_mutex); 1168 node->lnkcnt--; 1169 fibril_mutex_unlock(&nodes_mutex); 1170 fibril_rwlock_write_unlock(&namespace_rwlock); 1171 vfs_node_put(node); 1172 async_answer_0(rid, EOK); 1145 return EOK; 1173 1146 } 1174 1147 1175 1148 void vfs_rename(ipc_callid_t rid, ipc_call_t *request) 1176 1149 { 1150 /* The common base directory. */ 1151 int basefd; 1152 char *old = NULL; 1153 char *new = NULL; 1154 vfs_file_t *base = NULL; 1155 int rc; 1156 1157 basefd = IPC_GET_ARG1(*request); 1158 1177 1159 /* Retrieve the old path. */ 1178 char *old; 1179 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL); 1180 if (rc != EOK) { 1181 async_answer_0(rid, rc); 1182 return; 1160 rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL); 1161 if (rc != EOK) { 1162 goto out; 1183 1163 } 1184 1164 1185 1165 /* Retrieve the new path. */ 1186 char *new;1187 1166 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL); 1188 1167 if (rc != EOK) { 1189 free(old); 1190 async_answer_0(rid, rc); 1191 return; 1168 goto out; 1192 1169 } 1193 1170 … … 1198 1175 1199 1176 if ((!oldc) || (!newc)) { 1200 async_answer_0(rid, EINVAL); 1177 rc = EINVAL; 1178 goto out; 1179 } 1180 1181 assert(oldc[olen] == '\0'); 1182 assert(newc[nlen] == '\0'); 1183 1184 /* Lookup the file structure corresponding to the file descriptor. */ 1185 vfs_node_t *base_node = root; 1186 // TODO: Client-side root. 1187 if (basefd != -1) { 1188 base = vfs_file_get(basefd); 1189 if (!base) { 1190 rc = EBADF; 1191 goto out; 1192 } 1193 base_node = base->node; 1194 } 1195 1196 rc = vfs_rename_internal(base_node, oldc, newc); 1197 1198 out: 1199 async_answer_0(rid, rc); 1200 1201 if (old) { 1201 1202 free(old); 1203 } 1204 if (new) { 1202 1205 free(new); 1203 return; 1204 } 1205 1206 oldc[olen] = '\0'; 1207 newc[nlen] = '\0'; 1208 1209 if ((!str_lcmp(newc, oldc, str_length(oldc))) && 1210 ((newc[str_length(oldc)] == '/') || 1211 (str_length(oldc) == 1) || 1212 (str_length(oldc) == str_length(newc)))) { 1213 /* 1214 * oldc is a prefix of newc and either 1215 * - newc continues with a / where oldc ends, or 1216 * - oldc was / itself, or 1217 * - oldc and newc are equal. 1218 */ 1219 async_answer_0(rid, EINVAL); 1220 free(old); 1221 free(new); 1222 return; 1223 } 1224 1225 vfs_lookup_res_t old_lr; 1226 vfs_lookup_res_t new_lr; 1227 vfs_lookup_res_t new_par_lr; 1228 fibril_rwlock_write_lock(&namespace_rwlock); 1229 1230 /* Lookup the node belonging to the old file name. */ 1231 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL); 1232 if (rc != EOK) { 1233 fibril_rwlock_write_unlock(&namespace_rwlock); 1234 async_answer_0(rid, rc); 1235 free(old); 1236 free(new); 1237 return; 1238 } 1239 1240 vfs_node_t *old_node = vfs_node_get(&old_lr); 1241 if (!old_node) { 1242 fibril_rwlock_write_unlock(&namespace_rwlock); 1243 async_answer_0(rid, ENOMEM); 1244 free(old); 1245 free(new); 1246 return; 1247 } 1248 1249 /* Determine the path to the parent of the node with the new name. */ 1250 char *parentc = str_dup(newc); 1251 if (!parentc) { 1252 fibril_rwlock_write_unlock(&namespace_rwlock); 1253 vfs_node_put(old_node); 1254 async_answer_0(rid, rc); 1255 free(old); 1256 free(new); 1257 return; 1258 } 1259 1260 char *lastsl = str_rchr(parentc + 1, '/'); 1261 if (lastsl) 1262 *lastsl = '\0'; 1263 else 1264 parentc[1] = '\0'; 1265 1266 /* Lookup parent of the new file name. */ 1267 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL); 1268 free(parentc); /* not needed anymore */ 1269 if (rc != EOK) { 1270 fibril_rwlock_write_unlock(&namespace_rwlock); 1271 vfs_node_put(old_node); 1272 async_answer_0(rid, rc); 1273 free(old); 1274 free(new); 1275 return; 1276 } 1277 1278 /* Check whether linking to the same file system instance. */ 1279 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) || 1280 (old_node->service_id != new_par_lr.triplet.service_id)) { 1281 fibril_rwlock_write_unlock(&namespace_rwlock); 1282 vfs_node_put(old_node); 1283 async_answer_0(rid, EXDEV); /* different file systems */ 1284 free(old); 1285 free(new); 1286 return; 1287 } 1288 1289 /* Destroy the old link for the new name. */ 1290 vfs_node_t *new_node = NULL; 1291 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL); 1292 1293 switch (rc) { 1294 case ENOENT: 1295 /* simply not in our way */ 1296 break; 1297 case EOK: 1298 new_node = vfs_node_get(&new_lr); 1299 if (!new_node) { 1300 fibril_rwlock_write_unlock(&namespace_rwlock); 1301 vfs_node_put(old_node); 1302 async_answer_0(rid, ENOMEM); 1303 free(old); 1304 free(new); 1305 return; 1306 } 1307 fibril_mutex_lock(&nodes_mutex); 1308 new_node->lnkcnt--; 1309 fibril_mutex_unlock(&nodes_mutex); 1310 break; 1311 default: 1312 fibril_rwlock_write_unlock(&namespace_rwlock); 1313 vfs_node_put(old_node); 1314 async_answer_0(rid, ENOTEMPTY); 1315 free(old); 1316 free(new); 1317 return; 1318 } 1319 1320 /* Create the new link for the new name. */ 1321 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index); 1322 if (rc != EOK) { 1323 fibril_rwlock_write_unlock(&namespace_rwlock); 1324 vfs_node_put(old_node); 1325 if (new_node) 1326 vfs_node_put(new_node); 1327 async_answer_0(rid, rc); 1328 free(old); 1329 free(new); 1330 return; 1331 } 1332 1333 fibril_mutex_lock(&nodes_mutex); 1334 old_node->lnkcnt++; 1335 fibril_mutex_unlock(&nodes_mutex); 1336 1337 /* Destroy the link for the old name. */ 1338 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL); 1339 if (rc != EOK) { 1340 fibril_rwlock_write_unlock(&namespace_rwlock); 1341 vfs_node_put(old_node); 1342 if (new_node) 1343 vfs_node_put(new_node); 1344 async_answer_0(rid, rc); 1345 free(old); 1346 free(new); 1347 return; 1348 } 1349 1350 fibril_mutex_lock(&nodes_mutex); 1351 old_node->lnkcnt--; 1352 fibril_mutex_unlock(&nodes_mutex); 1353 fibril_rwlock_write_unlock(&namespace_rwlock); 1354 vfs_node_put(old_node); 1355 1356 if (new_node) 1357 vfs_node_put(new_node); 1358 1359 free(old); 1360 free(new); 1361 async_answer_0(rid, EOK); 1206 } 1207 if (base) { 1208 vfs_file_put(base); 1209 } 1362 1210 } 1363 1211 … … 1487 1335 vfs_lookup_res_t lr; 1488 1336 fibril_rwlock_read_lock(&namespace_rwlock); 1489 rc = vfs_lookup_internal( path, L_NONE, &lr, NULL);1337 rc = vfs_lookup_internal(root, path, L_NONE, &lr); 1490 1338 free(path); 1491 1339 if (rc != EOK) {
Note:
See TracChangeset
for help on using the changeset viewer.