Changes in kernel/generic/src/ddi/ddi.c [d7533c7:98000fb] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
rd7533c7 r98000fb 46 46 #include <mm/frame.h> 47 47 #include <mm/as.h> 48 #include <synch/ mutex.h>48 #include <synch/spinlock.h> 49 49 #include <syscall/copy.h> 50 50 #include <adt/btree.h> … … 52 52 #include <align.h> 53 53 #include <errno.h> 54 #include <trace.h>55 54 56 55 /** This lock protects the parea_btree. */ 57 static mutex_t parea_lock;56 SPINLOCK_INITIALIZE(parea_lock); 58 57 59 58 /** B+tree with enabled physical memory areas. */ 60 59 static btree_t parea_btree; 61 60 62 /** Initialize DDI. 63 * 64 */ 61 /** Initialize DDI. */ 65 62 void ddi_init(void) 66 63 { 67 64 btree_create(&parea_btree); 68 mutex_initialize(&parea_lock, MUTEX_PASSIVE);69 65 } 70 66 … … 76 72 void ddi_parea_register(parea_t *parea) 77 73 { 78 mutex_lock(&parea_lock); 74 ipl_t ipl = interrupts_disable(); 75 spinlock_lock(&parea_lock); 79 76 80 77 /* … … 83 80 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 84 81 85 mutex_unlock(&parea_lock); 82 spinlock_unlock(&parea_lock); 83 interrupts_restore(ipl); 86 84 } 87 85 … … 100 98 * 101 99 */ 102 NO_TRACE static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, 103 unsigned int flags) 100 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, int flags) 104 101 { 105 102 ASSERT(TASK); 106 107 if ((pf % FRAME_SIZE) != 0) 108 return EBADMEM; 109 110 if ((vp % PAGE_SIZE) != 0) 111 return EBADMEM; 112 113 /* 114 * Unprivileged tasks are only allowed to map pareas 115 * which are explicitly marked as such. 116 */ 117 bool priv = 118 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER); 103 ASSERT((pf % FRAME_SIZE) == 0); 104 ASSERT((vp % PAGE_SIZE) == 0); 105 106 /* 107 * Make sure the caller is authorised to make this syscall. 108 */ 109 cap_t caps = cap_get(TASK); 110 if (!(caps & CAP_MEM_MANAGER)) 111 return EPERM; 119 112 120 113 mem_backend_data_t backend_data; … … 122 115 backend_data.frames = pages; 123 116 117 ipl_t ipl = interrupts_disable(); 118 124 119 /* Find the zone of the physical memory */ 125 irq_spinlock_lock(&zones.lock, true);120 spinlock_lock(&zones.lock); 126 121 size_t znum = find_zone(ADDR2PFN(pf), pages, 0); 127 122 128 123 if (znum == (size_t) -1) { 129 /* 130 * Frames not found in any zone 131 * -> assume it is a hardware device and allow mapping 132 * for privileged tasks. 133 */ 134 irq_spinlock_unlock(&zones.lock, true); 124 /* Frames not found in any zones 125 * -> assume it is hardware device and allow mapping 126 */ 127 spinlock_unlock(&zones.lock); 128 goto map; 129 } 130 131 if (zones.info[znum].flags & ZONE_FIRMWARE) { 132 /* Frames are part of firmware */ 133 spinlock_unlock(&zones.lock); 134 goto map; 135 } 136 137 if (zone_flags_available(zones.info[znum].flags)) { 138 /* Frames are part of physical memory, check if the memory 139 * region is enabled for mapping. 140 */ 141 spinlock_unlock(&zones.lock); 135 142 136 if (!priv) 137 return EPERM; 138 139 goto map; 140 } 141 142 if (zones.info[znum].flags & ZONE_FIRMWARE) { 143 /* 144 * Frames are part of firmware 145 * -> allow mapping for privileged tasks. 146 */ 147 irq_spinlock_unlock(&zones.lock, true); 148 149 if (!priv) 150 return EPERM; 151 152 goto map; 153 } 154 155 if (zone_flags_available(zones.info[znum].flags)) { 156 /* 157 * Frames are part of physical memory, check 158 * if the memory region is enabled for mapping. 159 */ 160 irq_spinlock_unlock(&zones.lock, true); 161 162 mutex_lock(&parea_lock); 143 spinlock_lock(&parea_lock); 163 144 btree_node_t *nodep; 164 145 parea_t *parea = (parea_t *) btree_search(&parea_btree, 165 146 (btree_key_t) pf, &nodep); 166 147 167 if ((!parea) || (parea->frames < pages)) { 168 mutex_unlock(&parea_lock); 169 return ENOENT; 170 } 148 if ((!parea) || (parea->frames < pages)) 149 goto err; 171 150 172 if (!priv) { 173 if (!parea->unpriv) { 174 mutex_unlock(&parea_lock); 175 return EPERM; 176 } 177 } 178 179 mutex_unlock(&parea_lock); 151 spinlock_unlock(&parea_lock); 180 152 goto map; 181 153 } 182 154 183 irq_spinlock_unlock(&zones.lock, true); 155 err: 156 spinlock_unlock(&zones.lock); 157 interrupts_restore(ipl); 184 158 return ENOENT; 185 159 186 160 map: 161 spinlock_lock(&TASK->lock); 162 187 163 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, 188 164 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { … … 191 167 * We report it using ENOMEM. 192 168 */ 169 spinlock_unlock(&TASK->lock); 170 interrupts_restore(ipl); 193 171 return ENOMEM; 194 172 } … … 197 175 * Mapping is created on-demand during page fault. 198 176 */ 177 178 spinlock_unlock(&TASK->lock); 179 interrupts_restore(ipl); 199 180 return 0; 200 181 } … … 210 191 * 211 192 */ 212 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 213 size_t size) 193 static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 214 194 { 215 195 /* … … 220 200 return EPERM; 221 201 222 irq_spinlock_lock(&tasks_lock, true); 202 ipl_t ipl = interrupts_disable(); 203 spinlock_lock(&tasks_lock); 223 204 224 205 task_t *task = task_find_by_id(id); … … 230 211 * context. 231 212 */ 232 irq_spinlock_unlock(&tasks_lock, true); 213 spinlock_unlock(&tasks_lock); 214 interrupts_restore(ipl); 233 215 return ENOENT; 234 216 } 235 217 236 218 /* Lock the task and release the lock protecting tasks_btree. */ 237 irq_spinlock_exchange(&tasks_lock, &task->lock); 219 spinlock_lock(&task->lock); 220 spinlock_unlock(&tasks_lock); 238 221 239 222 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 240 223 241 irq_spinlock_unlock(&task->lock, true); 224 spinlock_unlock(&task->lock); 225 interrupts_restore(ipl); 242 226 243 227 return rc; … … 254 238 * 255 239 */ 256 sysarg_t sys_physmem_map(sysarg_t phys_base, sysarg_t virt_base,257 sysarg_t pages, sysarg_t flags)258 { 259 return ( sysarg_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,240 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, 241 unative_t pages, unative_t flags) 242 { 243 return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, 260 244 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), 261 245 (size_t) pages, (int) flags); … … 269 253 * 270 254 */ 271 sysarg_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)255 unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg) 272 256 { 273 257 ddi_ioarg_t arg; 274 258 int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); 275 259 if (rc != 0) 276 return ( sysarg_t) rc;277 278 return ( sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id,260 return (unative_t) rc; 261 262 return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, 279 263 (uintptr_t) arg.ioaddr, (size_t) arg.size); 280 264 } 281 265 266 /** Disable or enable preemption. 267 * 268 * @param enable If non-zero, the preemption counter will be decremented, 269 * leading to potential enabling of preemption. Otherwise 270 * the preemption counter will be incremented, preventing 271 * preemption from occurring. 272 * 273 * @return Zero on success or EPERM if callers capabilities are not sufficient. 274 * 275 */ 276 unative_t sys_preempt_control(int enable) 277 { 278 if (!cap_get(TASK) & CAP_PREEMPT_CONTROL) 279 return EPERM; 280 281 if (enable) 282 preemption_enable(); 283 else 284 preemption_disable(); 285 286 return 0; 287 } 288 282 289 /** @} 283 290 */
Note:
See TracChangeset
for help on using the changeset viewer.