Changes in uspace/srv/vfs/vfs_ops.c [4636a60:b1956e3] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/srv/vfs/vfs_ops.c
r4636a60 rb1956e3 68 68 FIBRIL_RWLOCK_INITIALIZE(namespace_rwlock); 69 69 70 vfs_node_t *root = NULL; 71 72 static int vfs_connect_internal(service_id_t service_id, unsigned flags, unsigned instance, 73 char *options, char *fsname, vfs_node_t **root) 74 { 75 fs_handle_t fs_handle = 0; 76 77 fibril_mutex_lock(&fs_list_lock); 78 while (1) { 79 fs_handle = fs_name_to_handle(instance, fsname, false); 80 81 if (fs_handle != 0 || !(flags & IPC_FLAG_BLOCKING)) { 82 break; 83 } 84 85 fibril_condvar_wait(&fs_list_cv, &fs_list_lock); 86 } 87 fibril_mutex_unlock(&fs_list_lock); 88 89 if (fs_handle == 0) { 90 return ENOENT; 91 } 92 93 /* Tell the mountee that it is being mounted. */ 70 vfs_pair_t rootfs = { 71 .fs_handle = 0, 72 .service_id = 0 73 }; 74 75 static int vfs_mount_internal(ipc_callid_t rid, service_id_t service_id, 76 fs_handle_t fs_handle, char *mp, char *opts) 77 { 78 vfs_lookup_res_t mp_res; 79 vfs_lookup_res_t mr_res; 80 vfs_node_t *mp_node = NULL; 81 vfs_node_t *mr_node; 82 fs_index_t rindex; 83 aoff64_t rsize; 84 unsigned rlnkcnt; 85 async_exch_t *exch; 86 sysarg_t rc; 87 aid_t msg; 94 88 ipc_call_t answer; 95 async_exch_t *exch = vfs_exchange_grab(fs_handle); 96 aid_t msg = async_send_1(exch, VFS_OUT_MOUNTED, (sysarg_t) service_id, &answer); 97 /* Send the mount options */ 98 sysarg_t rc = async_data_write_start(exch, options, str_size(options)); 99 if (rc != EOK) { 100 async_forget(msg); 101 vfs_exchange_release(exch); 102 return rc; 103 } 104 async_wait_for(msg, &rc); 105 vfs_exchange_release(exch); 106 107 if (rc != EOK) { 108 return rc; 109 } 110 111 vfs_lookup_res_t res; 112 res.triplet.fs_handle = fs_handle; 113 res.triplet.service_id = service_id; 114 res.triplet.index = (fs_index_t) IPC_GET_ARG1(answer); 115 res.size = (int64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), IPC_GET_ARG3(answer)); 116 res.type = VFS_NODE_DIRECTORY; 117 118 /* Add reference to the mounted root. */ 119 *root = vfs_node_get(&res); 120 assert(*root); 89 90 /* Resolve the path to the mountpoint. */ 91 fibril_rwlock_write_lock(&namespace_rwlock); 92 if (rootfs.fs_handle) { 93 /* We already have the root FS. */ 94 if (str_cmp(mp, "/") == 0) { 95 /* Trying to mount root FS over root FS */ 96 fibril_rwlock_write_unlock(&namespace_rwlock); 97 async_answer_0(rid, EBUSY); 98 return EBUSY; 99 } 100 101 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL); 102 if (rc != EOK) { 103 /* The lookup failed for some reason. */ 104 fibril_rwlock_write_unlock(&namespace_rwlock); 105 async_answer_0(rid, rc); 106 return rc; 107 } 108 109 mp_node = vfs_node_get(&mp_res); 110 if (!mp_node) { 111 fibril_rwlock_write_unlock(&namespace_rwlock); 112 async_answer_0(rid, ENOMEM); 113 return ENOMEM; 114 } 115 116 /* 117 * Now we hold a reference to mp_node. 118 * It will be dropped upon the corresponding VFS_IN_UNMOUNT. 119 * This prevents the mount point from being deleted. 120 */ 121 } else { 122 /* We still don't have the root file system mounted. */ 123 if (str_cmp(mp, "/") == 0) { 124 /* 125 * For this simple, but important case, 126 * we are almost done. 127 */ 121 128 122 return EOK; 123 } 124 125 static int vfs_mount_internal(service_id_t service_id, unsigned flags, unsigned instance, 126 char *opts, char *fs_name, char *mp) 127 { 128 /* Resolve the path to the mountpoint. */ 129 130 if (root == NULL) { 131 /* We still don't have the root file system mounted. */ 132 if (str_cmp(mp, "/") != 0) { 129 /* Tell the mountee that it is being mounted. */ 130 exch = vfs_exchange_grab(fs_handle); 131 msg = async_send_1(exch, VFS_OUT_MOUNTED, 132 (sysarg_t) service_id, &answer); 133 /* Send the mount options */ 134 rc = async_data_write_start(exch, (void *)opts, 135 str_size(opts)); 136 vfs_exchange_release(exch); 137 138 if (rc != EOK) { 139 async_forget(msg); 140 fibril_rwlock_write_unlock(&namespace_rwlock); 141 async_answer_0(rid, rc); 142 return rc; 143 } 144 async_wait_for(msg, &rc); 145 146 if (rc != EOK) { 147 fibril_rwlock_write_unlock(&namespace_rwlock); 148 async_answer_0(rid, rc); 149 return rc; 150 } 151 152 rindex = (fs_index_t) IPC_GET_ARG1(answer); 153 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), 154 IPC_GET_ARG3(answer)); 155 rlnkcnt = (unsigned) IPC_GET_ARG4(answer); 156 157 mr_res.triplet.fs_handle = fs_handle; 158 mr_res.triplet.service_id = service_id; 159 mr_res.triplet.index = rindex; 160 mr_res.size = rsize; 161 mr_res.lnkcnt = rlnkcnt; 162 mr_res.type = VFS_NODE_DIRECTORY; 163 164 rootfs.fs_handle = fs_handle; 165 rootfs.service_id = service_id; 166 167 /* Add reference to the mounted root. */ 168 mr_node = vfs_node_get(&mr_res); 169 assert(mr_node); 170 171 fibril_rwlock_write_unlock(&namespace_rwlock); 172 async_answer_0(rid, rc); 173 return rc; 174 } else { 133 175 /* 134 176 * We can't resolve this without the root filesystem 135 177 * being mounted first. 136 178 */ 179 fibril_rwlock_write_unlock(&namespace_rwlock); 180 async_answer_0(rid, ENOENT); 137 181 return ENOENT; 138 182 } 139 140 return vfs_connect_internal(service_id, flags, instance, opts, fs_name, &root); 141 } 142 143 /* We already have the root FS. */ 144 if (str_cmp(mp, "/") == 0) { 145 /* Trying to mount root FS over root FS */ 146 return EBUSY; 147 } 148 149 vfs_lookup_res_t mp_res; 150 int rc = vfs_lookup_internal(root, mp, L_DIRECTORY, &mp_res); 151 if (rc != EOK) { 152 /* The lookup failed. */ 183 } 184 185 /* 186 * At this point, we have all necessary pieces: file system handle 187 * and service ID, and we know the mount point VFS node. 188 */ 189 190 async_exch_t *mountee_exch = vfs_exchange_grab(fs_handle); 191 assert(mountee_exch); 192 193 exch = vfs_exchange_grab(mp_res.triplet.fs_handle); 194 msg = async_send_4(exch, VFS_OUT_MOUNT, 195 (sysarg_t) mp_res.triplet.service_id, 196 (sysarg_t) mp_res.triplet.index, 197 (sysarg_t) fs_handle, 198 (sysarg_t) service_id, &answer); 199 200 /* Send connection */ 201 rc = async_exchange_clone(exch, mountee_exch); 202 vfs_exchange_release(mountee_exch); 203 204 if (rc != EOK) { 205 vfs_exchange_release(exch); 206 async_forget(msg); 207 208 /* Mount failed, drop reference to mp_node. */ 209 if (mp_node) 210 vfs_node_put(mp_node); 211 212 async_answer_0(rid, rc); 213 fibril_rwlock_write_unlock(&namespace_rwlock); 153 214 return rc; 154 215 } 155 216 156 vfs_node_t *mp_node; 157 mp_node = vfs_node_get(&mp_res); 158 if (!mp_node) { 159 return ENOMEM; 160 } 161 162 if (mp_node->mount != NULL) { 163 return EBUSY; 164 } 165 166 if (mp_node->type != VFS_NODE_DIRECTORY) { 167 return ENOTDIR; 168 } 169 170 if (vfs_node_has_children(mp_node)) { 171 return ENOTEMPTY; 172 } 173 174 vfs_node_t *mountee; 175 176 rc = vfs_connect_internal(service_id, flags, instance, opts, fs_name, &mountee); 177 if (rc != EOK) { 178 vfs_node_put(mp_node); 179 return ENOMEM; 180 } 181 182 mp_node->mount = mountee; 183 /* The two references to nodes are held by the mount so that they cannot be freed. 184 * They are removed in detach_internal(). 185 */ 186 return EOK; 187 } 188 189 void vfs_mount(ipc_callid_t rid, ipc_call_t *request) 190 { 217 /* send the mount options */ 218 rc = async_data_write_start(exch, (void *) opts, str_size(opts)); 219 if (rc != EOK) { 220 vfs_exchange_release(exch); 221 async_forget(msg); 222 223 /* Mount failed, drop reference to mp_node. */ 224 if (mp_node) 225 vfs_node_put(mp_node); 226 227 fibril_rwlock_write_unlock(&namespace_rwlock); 228 async_answer_0(rid, rc); 229 return rc; 230 } 231 232 /* 233 * Wait for the answer before releasing the exchange to avoid deadlock 234 * in case the answer depends on further calls to the same file system. 235 * Think of a case when mounting a FS on a file_bd backed by a file on 236 * the same FS. 237 */ 238 async_wait_for(msg, &rc); 239 vfs_exchange_release(exch); 240 241 if (rc == EOK) { 242 rindex = (fs_index_t) IPC_GET_ARG1(answer); 243 rsize = (aoff64_t) MERGE_LOUP32(IPC_GET_ARG2(answer), 244 IPC_GET_ARG3(answer)); 245 rlnkcnt = (unsigned) IPC_GET_ARG4(answer); 246 247 mr_res.triplet.fs_handle = fs_handle; 248 mr_res.triplet.service_id = service_id; 249 mr_res.triplet.index = rindex; 250 mr_res.size = rsize; 251 mr_res.lnkcnt = rlnkcnt; 252 mr_res.type = VFS_NODE_DIRECTORY; 253 254 /* Add reference to the mounted root. */ 255 mr_node = vfs_node_get(&mr_res); 256 assert(mr_node); 257 } else { 258 /* Mount failed, drop reference to mp_node. */ 259 if (mp_node) 260 vfs_node_put(mp_node); 261 } 262 263 async_answer_0(rid, rc); 264 fibril_rwlock_write_unlock(&namespace_rwlock); 265 return rc; 266 } 267 268 void vfs_mount_srv(ipc_callid_t rid, ipc_call_t *request) 269 { 270 service_id_t service_id; 271 191 272 /* 192 273 * We expect the library to do the device-name to device-handle … … 194 275 * in the request. 195 276 */ 196 service_id _t service_id= (service_id_t) IPC_GET_ARG1(*request);277 service_id = (service_id_t) IPC_GET_ARG1(*request); 197 278 198 279 /* … … 220 301 0, NULL); 221 302 if (rc != EOK) { 222 async_answer_0(rid, rc);223 303 free(mp); 304 async_answer_0(rid, rc); 224 305 return; 225 306 } … … 233 314 FS_NAME_MAXLEN, 0, NULL); 234 315 if (rc != EOK) { 235 async_answer_0(rid, rc);236 316 free(mp); 237 317 free(opts); 238 return; 239 } 240 318 async_answer_0(rid, rc); 319 return; 320 } 321 322 /* 323 * Wait for VFS_IN_PING so that we can return an error if we don't know 324 * fs_name. 325 */ 326 ipc_call_t data; 327 ipc_callid_t callid = async_get_call(&data); 328 if (IPC_GET_IMETHOD(data) != VFS_IN_PING) { 329 async_answer_0(callid, ENOTSUP); 330 async_answer_0(rid, ENOTSUP); 331 free(mp); 332 free(opts); 333 free(fs_name); 334 return; 335 } 336 337 /* 338 * Check if we know a file system with the same name as is in fs_name. 339 * This will also give us its file system handle. 340 */ 341 fibril_mutex_lock(&fs_list_lock); 342 fs_handle_t fs_handle; 343 recheck: 344 fs_handle = fs_name_to_handle(instance, fs_name, false); 345 if (!fs_handle) { 346 if (flags & IPC_FLAG_BLOCKING) { 347 fibril_condvar_wait(&fs_list_cv, &fs_list_lock); 348 goto recheck; 349 } 350 351 fibril_mutex_unlock(&fs_list_lock); 352 async_answer_0(callid, ENOENT); 353 async_answer_0(rid, ENOENT); 354 free(mp); 355 free(fs_name); 356 free(opts); 357 return; 358 } 359 fibril_mutex_unlock(&fs_list_lock); 360 241 361 /* Add the filesystem info to the list of mounted filesystems */ 242 362 mtab_ent_t *mtab_ent = malloc(sizeof(mtab_ent_t)); 243 363 if (!mtab_ent) { 364 async_answer_0(callid, ENOMEM); 244 365 async_answer_0(rid, ENOMEM); 245 366 free(mp); … … 248 369 return; 249 370 } 250 251 /* Mount the filesystem. */ 252 fibril_rwlock_write_lock(&namespace_rwlock); 253 rc = vfs_mount_internal(service_id, flags, instance, opts, fs_name, mp); 254 fibril_rwlock_write_unlock(&namespace_rwlock); 371 372 /* Do the mount */ 373 rc = vfs_mount_internal(rid, service_id, fs_handle, mp, opts); 374 if (rc != EOK) { 375 async_answer_0(callid, ENOTSUP); 376 async_answer_0(rid, ENOTSUP); 377 free(mtab_ent); 378 free(mp); 379 free(opts); 380 free(fs_name); 381 return; 382 } 255 383 256 384 /* Add the filesystem info to the list of mounted filesystems */ 257 if (rc == EOK) { 258 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp); 259 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name); 260 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts); 261 mtab_ent->instance = instance; 262 mtab_ent->service_id = service_id; 263 264 link_initialize(&mtab_ent->link); 265 266 fibril_mutex_lock(&mtab_list_lock); 267 list_append(&mtab_ent->link, &mtab_list); 268 mtab_size++; 269 fibril_mutex_unlock(&mtab_list_lock); 270 } 271 272 async_answer_0(rid, rc); 385 386 str_cpy(mtab_ent->mp, MAX_PATH_LEN, mp); 387 str_cpy(mtab_ent->fs_name, FS_NAME_MAXLEN, fs_name); 388 str_cpy(mtab_ent->opts, MAX_MNTOPTS_LEN, opts); 389 mtab_ent->instance = instance; 390 mtab_ent->service_id = service_id; 391 392 link_initialize(&mtab_ent->link); 393 394 fibril_mutex_lock(&mtab_list_lock); 395 list_append(&mtab_ent->link, &mtab_list); 396 mtab_size++; 397 fibril_mutex_unlock(&mtab_list_lock); 273 398 274 399 free(mp); 275 400 free(fs_name); 276 401 free(opts); 277 } 278 279 void vfs_unmount(ipc_callid_t rid, ipc_call_t *request) 280 { 402 403 /* Acknowledge that we know fs_name. */ 404 async_answer_0(callid, EOK); 405 } 406 407 void vfs_unmount_srv(ipc_callid_t rid, ipc_call_t *request) 408 { 409 int rc; 410 char *mp; 411 vfs_lookup_res_t mp_res; 412 vfs_lookup_res_t mr_res; 413 vfs_node_t *mr_node; 414 async_exch_t *exch; 415 281 416 /* 282 417 * Receive the mount point path. 283 418 */ 284 char *mp; 285 int rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN, 419 rc = async_data_write_accept((void **) &mp, true, 0, MAX_PATH_LEN, 286 420 0, NULL); 287 421 if (rc != EOK) … … 297 431 */ 298 432 fibril_rwlock_write_lock(&namespace_rwlock); 299 433 434 /* 435 * Lookup the mounted root and instantiate it. 436 */ 437 rc = vfs_lookup_internal(mp, L_ROOT, &mr_res, NULL); 438 if (rc != EOK) { 439 fibril_rwlock_write_unlock(&namespace_rwlock); 440 free(mp); 441 async_answer_0(rid, rc); 442 return; 443 } 444 mr_node = vfs_node_get(&mr_res); 445 if (!mr_node) { 446 fibril_rwlock_write_unlock(&namespace_rwlock); 447 free(mp); 448 async_answer_0(rid, ENOMEM); 449 return; 450 } 451 452 /* 453 * Count the total number of references for the mounted file system. We 454 * are expecting at least two. One which we got above and one which we 455 * got when the file system was mounted. If we find more, it means that 456 * the file system cannot be gracefully unmounted at the moment because 457 * someone is working with it. 458 */ 459 if (vfs_nodes_refcount_sum_get(mr_node->fs_handle, 460 mr_node->service_id) != 2) { 461 fibril_rwlock_write_unlock(&namespace_rwlock); 462 vfs_node_put(mr_node); 463 free(mp); 464 async_answer_0(rid, EBUSY); 465 return; 466 } 467 300 468 if (str_cmp(mp, "/") == 0) { 301 free(mp);302 469 303 470 /* … … 308 475 */ 309 476 310 if (!root) { 477 exch = vfs_exchange_grab(mr_node->fs_handle); 478 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, 479 mr_node->service_id); 480 vfs_exchange_release(exch); 481 482 if (rc != EOK) { 311 483 fibril_rwlock_write_unlock(&namespace_rwlock); 312 async_answer_0(rid, ENOENT); 484 free(mp); 485 vfs_node_put(mr_node); 486 async_answer_0(rid, rc); 313 487 return; 314 488 } 315 489 490 rootfs.fs_handle = 0; 491 rootfs.service_id = 0; 492 } else { 493 316 494 /* 317 * Count the total number of references for the mounted file system. We 318 * are expecting at least one, which we got when the file system was mounted. 319 * If we find more, it means that 320 * the file system cannot be gracefully unmounted at the moment because 321 * someone is working with it. 495 * Unmounting a non-root file system. 496 * 497 * We have a regular mount point node representing the parent 498 * file system, so we delegate the operation to it. 322 499 */ 323 if (vfs_nodes_refcount_sum_get(root->fs_handle, root->service_id) != 1) { 500 501 rc = vfs_lookup_internal(mp, L_MP, &mp_res, NULL); 502 if (rc != EOK) { 324 503 fibril_rwlock_write_unlock(&namespace_rwlock); 325 async_answer_0(rid, EBUSY); 504 free(mp); 505 vfs_node_put(mr_node); 506 async_answer_0(rid, rc); 326 507 return; 327 508 } 328 509 329 async_exch_t *exch = vfs_exchange_grab(root->fs_handle); 330 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, root->service_id); 510 vfs_node_t *mp_node = vfs_node_get(&mp_res); 511 if (!mp_node) { 512 fibril_rwlock_write_unlock(&namespace_rwlock); 513 free(mp); 514 vfs_node_put(mr_node); 515 async_answer_0(rid, ENOMEM); 516 return; 517 } 518 519 exch = vfs_exchange_grab(mp_node->fs_handle); 520 rc = async_req_2_0(exch, VFS_OUT_UNMOUNT, 521 mp_node->service_id, mp_node->index); 331 522 vfs_exchange_release(exch); 332 523 333 fibril_rwlock_write_unlock(&namespace_rwlock); 334 if (rc == EOK) { 335 vfs_node_forget(root); 336 root = NULL; 337 } 338 async_answer_0(rid, rc); 339 return; 340 } 341 342 /* 343 * Lookup the mounted root and instantiate it. 344 */ 345 vfs_lookup_res_t mp_res; 346 rc = vfs_lookup_internal(root, mp, L_MP, &mp_res); 347 if (rc != EOK) { 348 fibril_rwlock_write_unlock(&namespace_rwlock); 349 free(mp); 350 async_answer_0(rid, rc); 351 return; 352 } 353 vfs_node_t *mp_node = vfs_node_get(&mp_res); 354 if (!mp_node) { 355 fibril_rwlock_write_unlock(&namespace_rwlock); 356 free(mp); 357 async_answer_0(rid, ENOMEM); 358 return; 359 } 360 361 if (mp_node->mount == NULL) { 362 fibril_rwlock_write_unlock(&namespace_rwlock); 524 if (rc != EOK) { 525 fibril_rwlock_write_unlock(&namespace_rwlock); 526 free(mp); 527 vfs_node_put(mp_node); 528 vfs_node_put(mr_node); 529 async_answer_0(rid, rc); 530 return; 531 } 532 533 /* Drop the reference we got above. */ 363 534 vfs_node_put(mp_node); 364 free(mp); 365 async_answer_0(rid, ENOENT); 366 return; 367 } 368 369 /* 370 * Count the total number of references for the mounted file system. We 371 * are expecting at least one, which we got when the file system was mounted. 372 * If we find more, it means that 373 * the file system cannot be gracefully unmounted at the moment because 374 * someone is working with it. 375 */ 376 if (vfs_nodes_refcount_sum_get(mp_node->mount->fs_handle, mp_node->mount->service_id) != 1) { 377 fibril_rwlock_write_unlock(&namespace_rwlock); 535 /* Drop the reference from when the file system was mounted. */ 378 536 vfs_node_put(mp_node); 379 free(mp); 380 async_answer_0(rid, EBUSY); 381 return; 382 } 383 384 /* Unmount the filesystem. */ 385 async_exch_t *exch = vfs_exchange_grab(mp_node->mount->fs_handle); 386 rc = async_req_1_0(exch, VFS_OUT_UNMOUNTED, mp_node->mount->service_id); 387 vfs_exchange_release(exch); 388 389 vfs_node_forget(mp_node->mount); 390 mp_node->mount = NULL; 391 392 vfs_node_put(mp_node); 537 } 538 539 /* 540 * All went well, the mounted file system was successfully unmounted. 541 * The only thing left is to forget the unmounted root VFS node. 542 */ 543 vfs_node_forget(mr_node); 393 544 fibril_rwlock_write_unlock(&namespace_rwlock); 394 545 395 546 fibril_mutex_lock(&mtab_list_lock); 547 396 548 int found = 0; 397 list_foreach(mtab_list, cur) { 398 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t, link); 399 549 550 list_foreach(mtab_list, link, mtab_ent_t, mtab_ent) { 400 551 if (str_cmp(mtab_ent->mp, mp) == 0) { 401 552 list_remove(&mtab_ent->link); … … 409 560 fibril_mutex_unlock(&mtab_list_lock); 410 561 411 free(mp); 412 562 free(mp); 563 413 564 async_answer_0(rid, EOK); 414 return; 415 } 416 417 static inline bool walk_flags_valid(int flags) 418 { 419 if ((flags&~WALK_ALL_FLAGS) != 0) { 420 return false; 421 } 422 if ((flags&WALK_MAY_CREATE) && (flags&WALK_MUST_CREATE)) { 423 return false; 424 } 425 if ((flags&WALK_REGULAR) && (flags&WALK_DIRECTORY)) { 426 return false; 427 } 428 if ((flags&WALK_MAY_CREATE) || (flags&WALK_MUST_CREATE)) { 429 if (!(flags&WALK_DIRECTORY) && !(flags&WALK_REGULAR)) { 430 return false; 431 } 432 } 433 return true; 434 } 435 436 static inline int walk_lookup_flags(int flags) 437 { 438 int lflags = 0; 439 if (flags&WALK_MAY_CREATE || flags&WALK_MUST_CREATE) { 440 lflags |= L_CREATE; 441 } 442 if (flags&WALK_MUST_CREATE) { 443 lflags |= L_EXCLUSIVE; 444 } 445 if (flags&WALK_REGULAR) { 446 lflags |= L_FILE; 447 } 448 if (flags&WALK_DIRECTORY) { 449 lflags |= L_DIRECTORY; 450 } 451 return lflags; 452 } 453 454 void vfs_walk(ipc_callid_t rid, ipc_call_t *request) 455 { 456 /* 457 * Parent is our relative root for file lookup. 458 * For defined flags, see <ipc/vfs.h>. 459 */ 460 int parentfd = IPC_GET_ARG1(*request); 461 int flags = IPC_GET_ARG2(*request); 462 463 if (!walk_flags_valid(flags)) { 565 } 566 567 void vfs_open(ipc_callid_t rid, ipc_call_t *request) 568 { 569 /* 570 * The POSIX interface is open(path, oflag, mode). 571 * We can receive oflags and mode along with the VFS_IN_OPEN call; 572 * the path will need to arrive in another call. 573 * 574 * We also receive one private, non-POSIX set of flags called lflag 575 * used to pass information to vfs_lookup_internal(). 576 */ 577 int lflag = IPC_GET_ARG1(*request); 578 int oflag = IPC_GET_ARG2(*request); 579 int mode = IPC_GET_ARG3(*request); 580 581 /* Ignore mode for now. */ 582 (void) mode; 583 584 /* 585 * Make sure that we are called with exactly one of L_FILE and 586 * L_DIRECTORY. Make sure that the user does not pass L_OPEN, 587 * L_ROOT or L_MP. 588 */ 589 if (((lflag & (L_FILE | L_DIRECTORY)) == 0) || 590 ((lflag & (L_FILE | L_DIRECTORY)) == (L_FILE | L_DIRECTORY)) || 591 (lflag & (L_OPEN | L_ROOT | L_MP))) { 464 592 async_answer_0(rid, EINVAL); 465 593 return; 466 594 } 467 595 596 if (oflag & O_CREAT) 597 lflag |= L_CREATE; 598 if (oflag & O_EXCL) 599 lflag |= L_EXCLUSIVE; 600 468 601 char *path; 469 int rc = async_data_write_accept((void **)&path, true, 0, 0, 0, NULL); 470 471 /* Lookup the file structure corresponding to the file descriptor. */ 472 vfs_file_t *parent = NULL; 473 vfs_node_t *parent_node = root; 474 // TODO: Client-side root. 475 if (parentfd != -1) { 476 parent = vfs_file_get(parentfd); 477 if (!parent) { 478 free(path); 479 async_answer_0(rid, EBADF); 480 return; 481 } 482 parent_node = parent->node; 483 } 484 485 fibril_rwlock_read_lock(&namespace_rwlock); 486 602 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 603 if (rc != EOK) { 604 async_answer_0(rid, rc); 605 return; 606 } 607 608 /* 609 * Avoid the race condition in which the file can be deleted before we 610 * find/create-and-lock the VFS node corresponding to the looked-up 611 * triplet. 612 */ 613 if (lflag & L_CREATE) 614 fibril_rwlock_write_lock(&namespace_rwlock); 615 else 616 fibril_rwlock_read_lock(&namespace_rwlock); 617 618 /* The path is now populated and we can call vfs_lookup_internal(). */ 487 619 vfs_lookup_res_t lr; 488 rc = vfs_lookup_internal(parent_node, path, walk_lookup_flags(flags), &lr); 620 rc = vfs_lookup_internal(path, lflag | L_OPEN, &lr, NULL); 621 if (rc != EOK) { 622 if (lflag & L_CREATE) 623 fibril_rwlock_write_unlock(&namespace_rwlock); 624 else 625 fibril_rwlock_read_unlock(&namespace_rwlock); 626 async_answer_0(rid, rc); 627 free(path); 628 return; 629 } 630 631 /* Path is no longer needed. */ 489 632 free(path); 490 491 if (rc != EOK) { 633 634 vfs_node_t *node = vfs_node_get(&lr); 635 if (lflag & L_CREATE) 636 fibril_rwlock_write_unlock(&namespace_rwlock); 637 else 492 638 fibril_rwlock_read_unlock(&namespace_rwlock); 493 if (parent) { 494 vfs_file_put(parent); 495 } 496 async_answer_0(rid, rc); 497 return; 498 } 499 500 vfs_node_t *node = vfs_node_get(&lr); 501 502 int fd = vfs_fd_alloc(false); 639 640 if (!node) { 641 async_answer_0(rid, ENOMEM); 642 return; 643 } 644 645 /* Truncate the file if requested and if necessary. */ 646 if (oflag & O_TRUNC) { 647 fibril_rwlock_write_lock(&node->contents_rwlock); 648 if (node->size) { 649 rc = vfs_truncate_internal(node->fs_handle, 650 node->service_id, node->index, 0); 651 if (rc) { 652 fibril_rwlock_write_unlock(&node->contents_rwlock); 653 vfs_node_put(node); 654 async_answer_0(rid, rc); 655 return; 656 } 657 node->size = 0; 658 } 659 fibril_rwlock_write_unlock(&node->contents_rwlock); 660 } 661 662 /* 663 * Get ourselves a file descriptor and the corresponding vfs_file_t 664 * structure. 665 */ 666 int fd = vfs_fd_alloc((oflag & O_DESC) != 0); 503 667 if (fd < 0) { 504 668 vfs_node_put(node); 505 if (parent) {506 vfs_file_put(parent);507 }508 669 async_answer_0(rid, fd); 509 670 return; 510 671 } 511 512 672 vfs_file_t *file = vfs_file_get(fd); 513 assert(file != NULL); 514 673 assert(file); 515 674 file->node = node; 516 if (parent) { 517 file->permissions = parent->permissions; 518 } else { 519 file->permissions = MODE_READ | MODE_WRITE | MODE_APPEND; 520 } 521 file->open_read = false; 522 file->open_write = false; 523 675 if (oflag & O_APPEND) 676 file->append = true; 677 678 /* 679 * The following increase in reference count is for the fact that the 680 * file is being opened and that a file structure is pointing to it. 681 * It is necessary so that the file will not disappear when 682 * vfs_node_put() is called. The reference will be dropped by the 683 * respective VFS_IN_CLOSE. 684 */ 685 vfs_node_addref(node); 686 vfs_node_put(node); 524 687 vfs_file_put(file); 525 if (parent) { 526 vfs_file_put(parent); 527 } 528 529 fibril_rwlock_read_unlock(&namespace_rwlock); 530 688 689 /* Success! Return the new file descriptor to the client. */ 531 690 async_answer_1(rid, EOK, fd); 532 }533 534 void vfs_open2(ipc_callid_t rid, ipc_call_t *request)535 {536 int fd = IPC_GET_ARG1(*request);537 int flags = IPC_GET_ARG2(*request);538 539 if (flags == 0) {540 async_answer_0(rid, EINVAL);541 return;542 }543 544 vfs_file_t *file = vfs_file_get(fd);545 if (!file) {546 async_answer_0(rid, EBADF);547 return;548 }549 550 if ((flags & ~file->permissions) != 0) {551 vfs_file_put(file);552 async_answer_0(rid, EPERM);553 return;554 }555 556 file->open_read = (flags & MODE_READ) != 0;557 file->open_write = (flags & (MODE_WRITE | MODE_APPEND)) != 0;558 file->append = (flags & MODE_APPEND) != 0;559 560 if (!file->open_read && !file->open_write) {561 vfs_file_put(file);562 async_answer_0(rid, EINVAL);563 return;564 }565 566 if (file->node->type == VFS_NODE_DIRECTORY && file->open_write) {567 file->open_read = file->open_write = false;568 vfs_file_put(file);569 async_answer_0(rid, EINVAL);570 return;571 }572 573 int rc = vfs_open_node_remote(file->node);574 if (rc != EOK) {575 file->open_read = file->open_write = false;576 vfs_file_put(file);577 async_answer_0(rid, rc);578 return;579 }580 581 vfs_file_put(file);582 async_answer_0(rid, EOK);583 691 } 584 692 … … 626 734 } 627 735 628 static void vfs_rdwr(ipc_callid_t rid, ipc_call_t *request, bool read) 736 typedef int (* rdwr_ipc_cb_t)(async_exch_t *, vfs_file_t *, ipc_call_t *, 737 bool, void *); 738 739 static int rdwr_ipc_client(async_exch_t *exch, vfs_file_t *file, 740 ipc_call_t *answer, bool read, void *data) 741 { 742 size_t *bytes = (size_t *) data; 743 int rc; 744 745 /* 746 * Make a VFS_READ/VFS_WRITE request at the destination FS server 747 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the 748 * destination FS server. The call will be routed as if sent by 749 * ourselves. Note that call arguments are immutable in this case so we 750 * don't have to bother. 751 */ 752 753 if (read) { 754 rc = async_data_read_forward_4_1(exch, VFS_OUT_READ, 755 file->node->service_id, file->node->index, 756 LOWER32(file->pos), UPPER32(file->pos), answer); 757 } else { 758 rc = async_data_write_forward_4_1(exch, VFS_OUT_WRITE, 759 file->node->service_id, file->node->index, 760 LOWER32(file->pos), UPPER32(file->pos), answer); 761 } 762 763 *bytes = IPC_GET_ARG1(*answer); 764 return rc; 765 } 766 767 static int rdwr_ipc_internal(async_exch_t *exch, vfs_file_t *file, 768 ipc_call_t *answer, bool read, void *data) 769 { 770 rdwr_io_chunk_t *chunk = (rdwr_io_chunk_t *) data; 771 772 if (exch == NULL) 773 return ENOENT; 774 775 aid_t msg = async_send_fast(exch, read ? VFS_OUT_READ : VFS_OUT_WRITE, 776 file->node->service_id, file->node->index, LOWER32(file->pos), 777 UPPER32(file->pos), answer); 778 if (msg == 0) 779 return EINVAL; 780 781 int retval = async_data_read_start(exch, chunk->buffer, chunk->size); 782 if (retval != EOK) { 783 async_forget(msg); 784 return retval; 785 } 786 787 sysarg_t rc; 788 async_wait_for(msg, &rc); 789 790 chunk->size = IPC_GET_ARG1(*answer); 791 792 return (int) rc; 793 } 794 795 static int vfs_rdwr(int fd, bool read, rdwr_ipc_cb_t ipc_cb, void *ipc_cb_data) 629 796 { 630 797 /* … … 638 805 */ 639 806 640 int fd = IPC_GET_ARG1(*request);641 642 807 /* Lookup the file structure corresponding to the file descriptor. */ 643 808 vfs_file_t *file = vfs_file_get(fd); 644 if (!file) { 645 async_answer_0(rid, ENOENT); 646 return; 647 } 809 if (!file) 810 return ENOENT; 648 811 649 812 /* … … 652 815 */ 653 816 fibril_mutex_lock(&file->lock); 654 655 if ((read && !file->open_read) || (!read && !file->open_write)) {656 fibril_mutex_unlock(&file->lock);657 async_answer_0(rid, EINVAL);658 return;659 }660 817 661 818 vfs_info_t *fs_info = fs_handle_to_info(file->node->fs_handle); … … 684 841 async_exch_t *fs_exch = vfs_exchange_grab(file->node->fs_handle); 685 842 686 /* 687 * Make a VFS_READ/VFS_WRITE request at the destination FS server 688 * and forward the IPC_M_DATA_READ/IPC_M_DATA_WRITE request to the 689 * destination FS server. The call will be routed as if sent by 690 * ourselves. Note that call arguments are immutable in this case so we 691 * don't have to bother. 692 */ 693 sysarg_t rc; 843 if (!read && file->append) 844 file->pos = file->node->size; 845 846 /* 847 * Handle communication with the endpoint FS. 848 */ 694 849 ipc_call_t answer; 695 if (read) { 696 rc = async_data_read_forward_4_1(fs_exch, VFS_OUT_READ, 697 file->node->service_id, file->node->index, 698 LOWER32(file->pos), UPPER32(file->pos), &answer); 699 } else { 700 if (file->append) 701 file->pos = vfs_node_get_size(file->node); 702 703 rc = async_data_write_forward_4_1(fs_exch, VFS_OUT_WRITE, 704 file->node->service_id, file->node->index, 705 LOWER32(file->pos), UPPER32(file->pos), &answer); 706 } 850 int rc = ipc_cb(fs_exch, file, &answer, read, ipc_cb_data); 707 851 708 852 vfs_exchange_release(fs_exch); … … 710 854 size_t bytes = IPC_GET_ARG1(answer); 711 855 712 if (file->node->type == VFS_NODE_DIRECTORY) {856 if (file->node->type == VFS_NODE_DIRECTORY) 713 857 fibril_rwlock_read_unlock(&namespace_rwlock); 714 }715 858 716 859 /* Unlock the VFS node. */ … … 732 875 vfs_file_put(file); 733 876 734 /* 735 * FS server's reply is the final result of the whole operation we 736 * return to the client. 737 */ 877 return rc; 878 } 879 880 static void vfs_rdwr_client(ipc_callid_t rid, ipc_call_t *request, bool read) 881 { 882 size_t bytes = 0; 883 int rc = vfs_rdwr(IPC_GET_ARG1(*request), read, rdwr_ipc_client, 884 &bytes); 738 885 async_answer_1(rid, rc, bytes); 739 886 } 740 887 888 int vfs_rdwr_internal(int fd, bool read, rdwr_io_chunk_t *chunk) 889 { 890 return vfs_rdwr(fd, read, rdwr_ipc_internal, chunk); 891 } 892 741 893 void vfs_read(ipc_callid_t rid, ipc_call_t *request) 742 894 { 743 vfs_rdwr (rid, request, true);895 vfs_rdwr_client(rid, request, true); 744 896 } 745 897 746 898 void vfs_write(ipc_callid_t rid, ipc_call_t *request) 747 899 { 748 vfs_rdwr (rid, request, false);900 vfs_rdwr_client(rid, request, false); 749 901 } 750 902 … … 792 944 793 945 file->pos += off; 794 newoff = (file->pos > OFF64_MAX) ? 946 newoff = (file->pos > OFF64_MAX) ? OFF64_MAX : file->pos; 795 947 796 948 fibril_mutex_unlock(&file->lock); … … 801 953 case SEEK_END: 802 954 fibril_rwlock_read_lock(&file->node->contents_rwlock); 803 aoff64_t size = vfs_node_get_size(file->node);955 aoff64_t size = file->node->size; 804 956 805 957 if ((off >= 0) && (size + off < size)) { … … 909 1061 } 910 1062 911 static void out_destroy(vfs_triplet_t *file) 912 { 913 async_exch_t *exch = vfs_exchange_grab(file->fs_handle); 914 async_msg_2(exch, VFS_OUT_DESTROY, 915 (sysarg_t) file->service_id, (sysarg_t) file->index); 1063 void vfs_stat(ipc_callid_t rid, ipc_call_t *request) 1064 { 1065 char *path; 1066 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 1067 if (rc != EOK) { 1068 async_answer_0(rid, rc); 1069 return; 1070 } 1071 1072 ipc_callid_t callid; 1073 if (!async_data_read_receive(&callid, NULL)) { 1074 free(path); 1075 async_answer_0(callid, EINVAL); 1076 async_answer_0(rid, EINVAL); 1077 return; 1078 } 1079 1080 vfs_lookup_res_t lr; 1081 fibril_rwlock_read_lock(&namespace_rwlock); 1082 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL); 1083 free(path); 1084 if (rc != EOK) { 1085 fibril_rwlock_read_unlock(&namespace_rwlock); 1086 async_answer_0(callid, rc); 1087 async_answer_0(rid, rc); 1088 return; 1089 } 1090 vfs_node_t *node = vfs_node_get(&lr); 1091 if (!node) { 1092 fibril_rwlock_read_unlock(&namespace_rwlock); 1093 async_answer_0(callid, ENOMEM); 1094 async_answer_0(rid, ENOMEM); 1095 return; 1096 } 1097 1098 fibril_rwlock_read_unlock(&namespace_rwlock); 1099 1100 async_exch_t *exch = vfs_exchange_grab(node->fs_handle); 1101 1102 aid_t msg; 1103 msg = async_send_3(exch, VFS_OUT_STAT, node->service_id, 1104 node->index, false, NULL); 1105 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME); 1106 916 1107 vfs_exchange_release(exch); 917 } 918 919 void vfs_unlink2(ipc_callid_t rid, ipc_call_t *request) 920 { 921 int rc; 1108 1109 sysarg_t rv; 1110 async_wait_for(msg, &rv); 1111 1112 async_answer_0(rid, rv); 1113 1114 vfs_node_put(node); 1115 } 1116 1117 void vfs_mkdir(ipc_callid_t rid, ipc_call_t *request) 1118 { 1119 int mode = IPC_GET_ARG1(*request); 1120 922 1121 char *path; 923 vfs_file_t *parent = NULL; 924 vfs_file_t *expect = NULL; 925 vfs_node_t *parent_node = root; 926 927 int parentfd = IPC_GET_ARG1(*request); 928 int expectfd = IPC_GET_ARG2(*request); 929 int wflag = IPC_GET_ARG3(*request); 930 931 rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 932 if (rc != EOK) { 933 async_answer_0(rid, rc); 934 return; 935 } 1122 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 1123 if (rc != EOK) { 1124 async_answer_0(rid, rc); 1125 return; 1126 } 1127 1128 /* Ignore mode for now. */ 1129 (void) mode; 936 1130 937 1131 fibril_rwlock_write_lock(&namespace_rwlock); 938 939 int lflag = (wflag&WALK_DIRECTORY) ? L_DIRECTORY: 0; 940 941 if (parentfd >= 0) { 942 parent = vfs_file_get(parentfd); 943 if (!parent) { 944 rc = ENOENT; 945 goto exit; 946 } 947 parent_node = parent->node; 948 } 949 950 if (expectfd >= 0) { 951 expect = vfs_file_get(expectfd); 952 if (!expect) { 953 rc = ENOENT; 954 goto exit; 955 } 956 957 vfs_lookup_res_t lr; 958 rc = vfs_lookup_internal(parent_node, path, lflag, &lr); 959 if (rc != EOK) { 960 goto exit; 961 } 962 963 if (__builtin_memcmp(&lr.triplet, expect->node, sizeof(vfs_triplet_t)) != 0) { 964 rc = ENOENT; 965 goto exit; 966 } 967 968 vfs_file_put(expect); 969 expect = NULL; 970 } 971 1132 int lflag = L_DIRECTORY | L_CREATE | L_EXCLUSIVE; 1133 rc = vfs_lookup_internal(path, lflag, NULL, NULL); 1134 fibril_rwlock_write_unlock(&namespace_rwlock); 1135 free(path); 1136 async_answer_0(rid, rc); 1137 } 1138 1139 void vfs_unlink(ipc_callid_t rid, ipc_call_t *request) 1140 { 1141 int lflag = IPC_GET_ARG1(*request); 1142 1143 char *path; 1144 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 1145 if (rc != EOK) { 1146 async_answer_0(rid, rc); 1147 return; 1148 } 1149 1150 fibril_rwlock_write_lock(&namespace_rwlock); 1151 lflag &= L_DIRECTORY; /* sanitize lflag */ 972 1152 vfs_lookup_res_t lr; 973 rc = vfs_lookup_internal(parent_node, path, lflag | L_UNLINK, &lr); 974 if (rc != EOK) { 975 goto exit; 976 } 977 978 /* If the node is not held by anyone, try to destroy it. */ 979 if (vfs_node_peek(&lr) == NULL) { 980 out_destroy(&lr.triplet); 981 } 982 983 exit: 984 if (path) { 985 free(path); 986 } 987 if (parent) { 988 vfs_file_put(parent); 989 } 990 if (expect) { 991 vfs_file_put(expect); 992 } 1153 rc = vfs_lookup_internal(path, lflag | L_UNLINK, &lr, NULL); 1154 free(path); 1155 if (rc != EOK) { 1156 fibril_rwlock_write_unlock(&namespace_rwlock); 1157 async_answer_0(rid, rc); 1158 return; 1159 } 1160 1161 /* 1162 * The name has already been unlinked by vfs_lookup_internal(). 1163 * We have to get and put the VFS node to ensure that it is 1164 * VFS_OUT_DESTROY'ed after the last reference to it is dropped. 1165 */ 1166 vfs_node_t *node = vfs_node_get(&lr); 1167 fibril_mutex_lock(&nodes_mutex); 1168 node->lnkcnt--; 1169 fibril_mutex_unlock(&nodes_mutex); 993 1170 fibril_rwlock_write_unlock(&namespace_rwlock); 994 async_answer_0(rid, rc); 995 } 996 997 static size_t shared_path(char *a, char *b) 998 { 999 size_t res = 0; 1000 1001 while (a[res] == b[res] && a[res] != 0) { 1002 res++; 1003 } 1004 1005 if (a[res] == b[res]) { 1006 return res; 1007 } 1008 1009 res--; 1010 while (a[res] != '/') { 1011 res--; 1012 } 1013 return res; 1014 } 1015 1016 static int vfs_rename_internal(vfs_node_t *base, char *old, char *new) 1017 { 1018 assert(base != NULL); 1019 assert(old != NULL); 1020 assert(new != NULL); 1021 1022 vfs_lookup_res_t base_lr; 1023 vfs_lookup_res_t old_lr; 1024 vfs_lookup_res_t new_lr_orig; 1025 bool orig_unlinked = false; 1026 1027 int rc; 1028 1029 size_t shared = shared_path(old, new); 1030 1031 /* Do not allow one path to be a prefix of the other. */ 1032 if (old[shared] == 0 || new[shared] == 0) { 1033 return EINVAL; 1034 } 1035 assert(old[shared] == '/'); 1036 assert(new[shared] == '/'); 1037 1038 fibril_rwlock_write_lock(&namespace_rwlock); 1039 1040 /* Resolve the shared portion of the path first. */ 1041 if (shared != 0) { 1042 old[shared] = 0; 1043 rc = vfs_lookup_internal(base, old, L_DIRECTORY, &base_lr); 1044 if (rc != EOK) { 1045 fibril_rwlock_write_unlock(&namespace_rwlock); 1046 return rc; 1047 } 1048 1049 base = vfs_node_get(&base_lr); 1050 old[shared] = '/'; 1051 old += shared; 1052 new += shared; 1053 } else { 1054 vfs_node_addref(base); 1055 } 1056 1057 1058 rc = vfs_lookup_internal(base, new, L_UNLINK | L_DISABLE_MOUNTS, &new_lr_orig); 1059 if (rc == EOK) { 1060 orig_unlinked = true; 1061 } else if (rc != ENOENT) { 1062 vfs_node_put(base); 1063 fibril_rwlock_write_unlock(&namespace_rwlock); 1064 return rc; 1065 } 1066 1067 rc = vfs_lookup_internal(base, old, L_UNLINK | L_DISABLE_MOUNTS, &old_lr); 1068 if (rc != EOK) { 1069 if (orig_unlinked) { 1070 vfs_link_internal(base, new, &new_lr_orig.triplet); 1071 } 1072 vfs_node_put(base); 1073 fibril_rwlock_write_unlock(&namespace_rwlock); 1074 return rc; 1075 } 1076 1077 rc = vfs_link_internal(base, new, &old_lr.triplet); 1078 if (rc != EOK) { 1079 vfs_link_internal(base, old, &old_lr.triplet); 1080 if (orig_unlinked) { 1081 vfs_link_internal(base, new, &new_lr_orig.triplet); 1082 } 1083 vfs_node_put(base); 1084 fibril_rwlock_write_unlock(&namespace_rwlock); 1085 return rc; 1086 } 1087 1088 /* If the node is not held by anyone, try to destroy it. */ 1089 if (orig_unlinked && vfs_node_peek(&new_lr_orig) == NULL) { 1090 out_destroy(&new_lr_orig.triplet); 1091 } 1092 1093 vfs_node_put(base); 1094 fibril_rwlock_write_unlock(&namespace_rwlock); 1095 return EOK; 1171 vfs_node_put(node); 1172 async_answer_0(rid, EOK); 1096 1173 } 1097 1174 1098 1175 void vfs_rename(ipc_callid_t rid, ipc_call_t *request) 1099 1176 { 1100 /* The common base directory. */1101 int basefd;1102 char *old = NULL;1103 char *new = NULL;1104 vfs_file_t *base = NULL;1105 int rc;1106 1107 basefd = IPC_GET_ARG1(*request);1108 1109 1177 /* Retrieve the old path. */ 1110 rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL); 1111 if (rc != EOK) { 1112 goto out; 1178 char *old; 1179 int rc = async_data_write_accept((void **) &old, true, 0, 0, 0, NULL); 1180 if (rc != EOK) { 1181 async_answer_0(rid, rc); 1182 return; 1113 1183 } 1114 1184 1115 1185 /* Retrieve the new path. */ 1186 char *new; 1116 1187 rc = async_data_write_accept((void **) &new, true, 0, 0, 0, NULL); 1117 1188 if (rc != EOK) { 1118 goto out; 1189 free(old); 1190 async_answer_0(rid, rc); 1191 return; 1119 1192 } 1120 1193 … … 1125 1198 1126 1199 if ((!oldc) || (!newc)) { 1127 rc = EINVAL; 1128 goto out; 1129 } 1130 1131 assert(oldc[olen] == '\0'); 1132 assert(newc[nlen] == '\0'); 1133 1134 /* Lookup the file structure corresponding to the file descriptor. */ 1135 vfs_node_t *base_node = root; 1136 // TODO: Client-side root. 1137 if (basefd != -1) { 1138 base = vfs_file_get(basefd); 1139 if (!base) { 1140 rc = EBADF; 1141 goto out; 1142 } 1143 base_node = base->node; 1144 } 1145 1146 rc = vfs_rename_internal(base_node, oldc, newc); 1147 1148 out: 1149 async_answer_0(rid, rc); 1150 1151 if (old) { 1200 async_answer_0(rid, EINVAL); 1152 1201 free(old); 1153 }1154 if (new) {1155 1202 free(new); 1156 } 1157 if (base) { 1158 vfs_file_put(base); 1159 } 1203 return; 1204 } 1205 1206 oldc[olen] = '\0'; 1207 newc[nlen] = '\0'; 1208 1209 if ((!str_lcmp(newc, oldc, str_length(oldc))) && 1210 ((newc[str_length(oldc)] == '/') || 1211 (str_length(oldc) == 1) || 1212 (str_length(oldc) == str_length(newc)))) { 1213 /* 1214 * oldc is a prefix of newc and either 1215 * - newc continues with a / where oldc ends, or 1216 * - oldc was / itself, or 1217 * - oldc and newc are equal. 1218 */ 1219 async_answer_0(rid, EINVAL); 1220 free(old); 1221 free(new); 1222 return; 1223 } 1224 1225 vfs_lookup_res_t old_lr; 1226 vfs_lookup_res_t new_lr; 1227 vfs_lookup_res_t new_par_lr; 1228 fibril_rwlock_write_lock(&namespace_rwlock); 1229 1230 /* Lookup the node belonging to the old file name. */ 1231 rc = vfs_lookup_internal(oldc, L_NONE, &old_lr, NULL); 1232 if (rc != EOK) { 1233 fibril_rwlock_write_unlock(&namespace_rwlock); 1234 async_answer_0(rid, rc); 1235 free(old); 1236 free(new); 1237 return; 1238 } 1239 1240 vfs_node_t *old_node = vfs_node_get(&old_lr); 1241 if (!old_node) { 1242 fibril_rwlock_write_unlock(&namespace_rwlock); 1243 async_answer_0(rid, ENOMEM); 1244 free(old); 1245 free(new); 1246 return; 1247 } 1248 1249 /* Determine the path to the parent of the node with the new name. */ 1250 char *parentc = str_dup(newc); 1251 if (!parentc) { 1252 fibril_rwlock_write_unlock(&namespace_rwlock); 1253 vfs_node_put(old_node); 1254 async_answer_0(rid, rc); 1255 free(old); 1256 free(new); 1257 return; 1258 } 1259 1260 char *lastsl = str_rchr(parentc + 1, '/'); 1261 if (lastsl) 1262 *lastsl = '\0'; 1263 else 1264 parentc[1] = '\0'; 1265 1266 /* Lookup parent of the new file name. */ 1267 rc = vfs_lookup_internal(parentc, L_NONE, &new_par_lr, NULL); 1268 free(parentc); /* not needed anymore */ 1269 if (rc != EOK) { 1270 fibril_rwlock_write_unlock(&namespace_rwlock); 1271 vfs_node_put(old_node); 1272 async_answer_0(rid, rc); 1273 free(old); 1274 free(new); 1275 return; 1276 } 1277 1278 /* Check whether linking to the same file system instance. */ 1279 if ((old_node->fs_handle != new_par_lr.triplet.fs_handle) || 1280 (old_node->service_id != new_par_lr.triplet.service_id)) { 1281 fibril_rwlock_write_unlock(&namespace_rwlock); 1282 vfs_node_put(old_node); 1283 async_answer_0(rid, EXDEV); /* different file systems */ 1284 free(old); 1285 free(new); 1286 return; 1287 } 1288 1289 /* Destroy the old link for the new name. */ 1290 vfs_node_t *new_node = NULL; 1291 rc = vfs_lookup_internal(newc, L_UNLINK, &new_lr, NULL); 1292 1293 switch (rc) { 1294 case ENOENT: 1295 /* simply not in our way */ 1296 break; 1297 case EOK: 1298 new_node = vfs_node_get(&new_lr); 1299 if (!new_node) { 1300 fibril_rwlock_write_unlock(&namespace_rwlock); 1301 vfs_node_put(old_node); 1302 async_answer_0(rid, ENOMEM); 1303 free(old); 1304 free(new); 1305 return; 1306 } 1307 fibril_mutex_lock(&nodes_mutex); 1308 new_node->lnkcnt--; 1309 fibril_mutex_unlock(&nodes_mutex); 1310 break; 1311 default: 1312 fibril_rwlock_write_unlock(&namespace_rwlock); 1313 vfs_node_put(old_node); 1314 async_answer_0(rid, ENOTEMPTY); 1315 free(old); 1316 free(new); 1317 return; 1318 } 1319 1320 /* Create the new link for the new name. */ 1321 rc = vfs_lookup_internal(newc, L_LINK, NULL, NULL, old_node->index); 1322 if (rc != EOK) { 1323 fibril_rwlock_write_unlock(&namespace_rwlock); 1324 vfs_node_put(old_node); 1325 if (new_node) 1326 vfs_node_put(new_node); 1327 async_answer_0(rid, rc); 1328 free(old); 1329 free(new); 1330 return; 1331 } 1332 1333 fibril_mutex_lock(&nodes_mutex); 1334 old_node->lnkcnt++; 1335 fibril_mutex_unlock(&nodes_mutex); 1336 1337 /* Destroy the link for the old name. */ 1338 rc = vfs_lookup_internal(oldc, L_UNLINK, NULL, NULL); 1339 if (rc != EOK) { 1340 fibril_rwlock_write_unlock(&namespace_rwlock); 1341 vfs_node_put(old_node); 1342 if (new_node) 1343 vfs_node_put(new_node); 1344 async_answer_0(rid, rc); 1345 free(old); 1346 free(new); 1347 return; 1348 } 1349 1350 fibril_mutex_lock(&nodes_mutex); 1351 old_node->lnkcnt--; 1352 fibril_mutex_unlock(&nodes_mutex); 1353 fibril_rwlock_write_unlock(&namespace_rwlock); 1354 vfs_node_put(old_node); 1355 1356 if (new_node) 1357 vfs_node_put(new_node); 1358 1359 free(old); 1360 free(new); 1361 async_answer_0(rid, EOK); 1160 1362 } 1161 1363 … … 1222 1424 async_answer_1(callid, EOK, mtab_size); 1223 1425 1224 list_foreach(mtab_list, cur) { 1225 mtab_ent_t *mtab_ent = list_get_instance(cur, mtab_ent_t, 1226 link); 1227 1426 list_foreach(mtab_list, link, mtab_ent_t, mtab_ent) { 1228 1427 rc = ENOTSUP; 1229 1428 … … 1269 1468 } 1270 1469 1470 void vfs_statfs(ipc_callid_t rid, ipc_call_t *request) 1471 { 1472 char *path; 1473 int rc = async_data_write_accept((void **) &path, true, 0, 0, 0, NULL); 1474 if (rc != EOK) { 1475 async_answer_0(rid, rc); 1476 return; 1477 } 1478 1479 ipc_callid_t callid; 1480 if (!async_data_read_receive(&callid, NULL)) { 1481 free(path); 1482 async_answer_0(callid, EINVAL); 1483 async_answer_0(rid, EINVAL); 1484 return; 1485 } 1486 1487 vfs_lookup_res_t lr; 1488 fibril_rwlock_read_lock(&namespace_rwlock); 1489 rc = vfs_lookup_internal(path, L_NONE, &lr, NULL); 1490 free(path); 1491 if (rc != EOK) { 1492 fibril_rwlock_read_unlock(&namespace_rwlock); 1493 async_answer_0(callid, rc); 1494 async_answer_0(rid, rc); 1495 return; 1496 } 1497 vfs_node_t *node = vfs_node_get(&lr); 1498 if (!node) { 1499 fibril_rwlock_read_unlock(&namespace_rwlock); 1500 async_answer_0(callid, ENOMEM); 1501 async_answer_0(rid, ENOMEM); 1502 return; 1503 } 1504 1505 fibril_rwlock_read_unlock(&namespace_rwlock); 1506 1507 async_exch_t *exch = vfs_exchange_grab(node->fs_handle); 1508 1509 aid_t msg; 1510 msg = async_send_3(exch, VFS_OUT_STATFS, node->service_id, 1511 node->index, false, NULL); 1512 async_forward_fast(callid, exch, 0, 0, 0, IPC_FF_ROUTE_FROM_ME); 1513 1514 vfs_exchange_release(exch); 1515 1516 sysarg_t rv; 1517 async_wait_for(msg, &rv); 1518 1519 async_answer_0(rid, rv); 1520 1521 vfs_node_put(node); 1522 } 1523 1271 1524 /** 1272 1525 * @}
Note:
See TracChangeset
for help on using the changeset viewer.