Changes in kernel/generic/src/ddi/ddi.c [fbcdeb8:c6ae4c2] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
rfbcdeb8 rc6ae4c2 90 90 * 91 91 * @param phys Physical address of the starting frame. 92 * @param virt Virtual address of the starting page. 92 93 * @param pages Number of pages to map. 93 94 * @param flags Address space area flags for the mapping. 94 * @param virt Virtual address of the starting page.95 * @param bound Lowest virtual address bound.96 95 * 97 96 * @return EOK on success. 98 97 * @return EPERM if the caller lacks capabilities to use this syscall. 99 * @return EBADMEM if phys is not page aligned.98 * @return EBADMEM if phys or virt is not page aligned. 100 99 * @return ENOENT if there is no task matching the specified ID or 101 100 * the physical address space is not enabled for mapping. … … 103 102 * 104 103 */ 105 NO_TRACE static int physmem_map(uintptr_t phys, size_t pages,106 unsigned int flags , uintptr_t *virt, uintptr_t bound)104 NO_TRACE static int ddi_physmem_map(uintptr_t phys, uintptr_t virt, size_t pages, 105 unsigned int flags) 107 106 { 108 107 ASSERT(TASK); 109 108 110 109 if ((phys % FRAME_SIZE) != 0) 110 return EBADMEM; 111 112 if ((virt % PAGE_SIZE) != 0) 111 113 return EBADMEM; 112 114 … … 183 185 184 186 map: 185 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), 186 AS_AREA_ATTR_NONE, &phys_backend, &backend_data , virt, bound)) {187 if (!as_area_create(TASK->as, flags, FRAMES2SIZE(pages), virt, 188 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { 187 189 /* 188 190 * The address space area was not created. … … 208 210 } 209 211 210 NO_TRACE static int physmem_unmap(uintptr_t virt)211 {212 // TODO: implement unmap213 return EOK;214 }215 216 /** Wrapper for SYS_PHYSMEM_MAP syscall.217 *218 * @param phys Physical base address to map219 * @param pages Number of pages220 * @param flags Flags of newly mapped pages221 * @param virt_ptr Destination virtual address222 * @param bound Lowest virtual address bound.223 *224 * @return 0 on success, otherwise it returns error code found in errno.h225 *226 */227 sysarg_t sys_physmem_map(uintptr_t phys, size_t pages, unsigned int flags,228 void *virt_ptr, uintptr_t bound)229 {230 uintptr_t virt = (uintptr_t) -1;231 int rc = physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), pages, flags,232 &virt, bound);233 if (rc != EOK)234 return rc;235 236 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));237 if (rc != EOK) {238 physmem_unmap((uintptr_t) virt);239 return rc;240 }241 242 return EOK;243 }244 245 sysarg_t sys_physmem_unmap(uintptr_t virt)246 {247 return physmem_unmap(virt);248 }249 250 212 /** Enable range of I/O space for task. 251 213 * … … 258 220 * 259 221 */ 260 NO_TRACE static int iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 222 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 223 size_t size) 261 224 { 262 225 /* … … 283 246 /* Lock the task and release the lock protecting tasks_btree. */ 284 247 irq_spinlock_exchange(&tasks_lock, &task->lock); 248 285 249 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 250 286 251 irq_spinlock_unlock(&task->lock, true); 287 252 288 253 return rc; 254 } 255 256 /** Wrapper for SYS_PHYSMEM_MAP syscall. 257 * 258 * @param phys Physical base address to map 259 * @param virt Destination virtual address 260 * @param pages Number of pages 261 * @param flags Flags of newly mapped pages 262 * 263 * @return 0 on success, otherwise it returns error code found in errno.h 264 * 265 */ 266 sysarg_t sys_physmem_map(uintptr_t phys, uintptr_t virt, 267 size_t pages, unsigned int flags) 268 { 269 return (sysarg_t) 270 ddi_physmem_map(ALIGN_DOWN(phys, FRAME_SIZE), 271 ALIGN_DOWN(virt, PAGE_SIZE), pages, flags); 289 272 } 290 273 … … 303 286 return (sysarg_t) rc; 304 287 305 return (sysarg_t) iospace_enable((task_id_t) arg.task_id,288 return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id, 306 289 (uintptr_t) arg.ioaddr, (size_t) arg.size); 307 290 } 308 291 309 sysarg_t sys_iospace_disable(ddi_ioarg_t *uspace_io_arg) 310 { 311 // TODO: implement 312 return ENOTSUP; 313 } 314 315 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 316 unsigned int flags, void **phys) 292 NO_TRACE static int dmamem_map(uintptr_t virt, size_t size, 293 unsigned int map_flags, unsigned int flags, void **phys) 317 294 { 318 295 ASSERT(TASK); 319 296 320 // TODO: implement locking of non-anonymous mapping 321 return page_find_mapping(virt, phys); 322 } 323 324 NO_TRACE static int dmamem_map_anonymous(size_t size, unsigned int map_flags, 325 unsigned int flags, void **phys, uintptr_t *virt, uintptr_t bound) 326 { 327 ASSERT(TASK); 328 329 size_t pages = SIZE2FRAMES(size); 330 uint8_t order; 331 332 /* We need the 2^order >= pages */ 333 if (pages == 1) 334 order = 0; 335 else 336 order = fnzb(pages - 1) + 1; 337 338 *phys = frame_alloc_noreserve(order, 0); 339 if (*phys == NULL) 340 return ENOMEM; 341 342 mem_backend_data_t backend_data; 343 backend_data.base = (uintptr_t) *phys; 344 backend_data.frames = pages; 345 346 if (!as_area_create(TASK->as, map_flags, size, 347 AS_AREA_ATTR_NONE, &phys_backend, &backend_data, virt, bound)) { 348 frame_free_noreserve((uintptr_t) *phys); 349 return ENOMEM; 350 } 351 352 return EOK; 353 } 354 355 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size) 297 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) { 298 // TODO: implement locking of non-anonymous mapping 299 return page_find_mapping(virt, phys); 300 } else { 301 // TODO: implement locking 302 303 if ((virt % PAGE_SIZE) != 0) 304 return EBADMEM; 305 306 size_t pages = SIZE2FRAMES(size); 307 uint8_t order; 308 309 /* We need the 2^order >= pages */ 310 if (pages == 1) 311 order = 0; 312 else 313 order = fnzb(pages - 1) + 1; 314 315 *phys = frame_alloc_noreserve(order, 0); 316 if (*phys == NULL) 317 return ENOMEM; 318 319 mem_backend_data_t backend_data; 320 backend_data.base = (uintptr_t) *phys; 321 backend_data.frames = pages; 322 323 if (!as_area_create(TASK->as, map_flags, size, virt, 324 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { 325 frame_free_noreserve((uintptr_t) *phys); 326 return ENOMEM; 327 } 328 329 return EOK; 330 } 331 } 332 333 NO_TRACE static int dmamem_unmap(uintptr_t virt, size_t size, 334 unsigned int flags) 356 335 { 357 336 // TODO: implement unlocking & unmap … … 359 338 } 360 339 361 NO_TRACE static int dmamem_unmap_anonymous(uintptr_t virt) 362 { 363 // TODO: implement unlocking & unmap 340 sysarg_t sys_dmamem_map(uintptr_t virt, size_t size, unsigned int map_flags, 341 unsigned int flags, void *phys_ptr) 342 { 343 void *phys; 344 int rc = dmamem_map(virt, size, map_flags, flags, &phys); 345 if (rc != EOK) 346 return rc; 347 348 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys)); 349 if (rc != EOK) { 350 dmamem_unmap(virt, size, flags); 351 return rc; 352 } 353 364 354 return EOK; 365 355 } 366 356 367 sysarg_t sys_dmamem_map(size_t size, unsigned int map_flags, unsigned int flags,368 void *phys_ptr, void *virt_ptr, uintptr_t bound)369 {370 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) {371 /*372 * Non-anonymous DMA mapping373 */374 375 void *phys;376 int rc = dmamem_map((uintptr_t) virt_ptr, size, map_flags,377 flags, &phys);378 379 if (rc != EOK)380 return rc;381 382 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));383 if (rc != EOK) {384 dmamem_unmap((uintptr_t) virt_ptr, size);385 return rc;386 }387 } else {388 /*389 * Anonymous DMA mapping390 */391 392 void *phys;393 uintptr_t virt = (uintptr_t) -1;394 int rc = dmamem_map_anonymous(size, map_flags, flags,395 &phys, &virt, bound);396 if (rc != EOK)397 return rc;398 399 rc = copy_to_uspace(phys_ptr, &phys, sizeof(phys));400 if (rc != EOK) {401 dmamem_unmap_anonymous((uintptr_t) virt);402 return rc;403 }404 405 rc = copy_to_uspace(virt_ptr, &virt, sizeof(virt));406 if (rc != EOK) {407 dmamem_unmap_anonymous((uintptr_t) virt);408 return rc;409 }410 }411 412 return EOK;413 }414 415 357 sysarg_t sys_dmamem_unmap(uintptr_t virt, size_t size, unsigned int flags) 416 358 { 417 if ((flags & DMAMEM_FLAGS_ANONYMOUS) == 0) 418 return dmamem_unmap(virt, size); 419 else 420 return dmamem_unmap_anonymous(virt); 359 return dmamem_unmap(virt, size, flags); 421 360 } 422 361
Note:
See TracChangeset
for help on using the changeset viewer.