Changes in kernel/generic/src/ddi/ddi.c [98000fb:d7533c7] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
r98000fb rd7533c7 46 46 #include <mm/frame.h> 47 47 #include <mm/as.h> 48 #include <synch/ spinlock.h>48 #include <synch/mutex.h> 49 49 #include <syscall/copy.h> 50 50 #include <adt/btree.h> … … 52 52 #include <align.h> 53 53 #include <errno.h> 54 #include <trace.h> 54 55 55 56 /** This lock protects the parea_btree. */ 56 SPINLOCK_INITIALIZE(parea_lock);57 static mutex_t parea_lock; 57 58 58 59 /** B+tree with enabled physical memory areas. */ 59 60 static btree_t parea_btree; 60 61 61 /** Initialize DDI. */ 62 /** Initialize DDI. 63 * 64 */ 62 65 void ddi_init(void) 63 66 { 64 67 btree_create(&parea_btree); 68 mutex_initialize(&parea_lock, MUTEX_PASSIVE); 65 69 } 66 70 … … 72 76 void ddi_parea_register(parea_t *parea) 73 77 { 74 ipl_t ipl = interrupts_disable(); 75 spinlock_lock(&parea_lock); 78 mutex_lock(&parea_lock); 76 79 77 80 /* … … 80 83 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 81 84 82 spinlock_unlock(&parea_lock); 83 interrupts_restore(ipl); 85 mutex_unlock(&parea_lock); 84 86 } 85 87 … … 98 100 * 99 101 */ 100 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, int flags) 102 NO_TRACE static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, 103 unsigned int flags) 101 104 { 102 105 ASSERT(TASK); 103 ASSERT((pf % FRAME_SIZE) == 0); 104 ASSERT((vp % PAGE_SIZE) == 0); 105 106 /* 107 * Make sure the caller is authorised to make this syscall. 108 */ 109 cap_t caps = cap_get(TASK); 110 if (!(caps & CAP_MEM_MANAGER)) 111 return EPERM; 106 107 if ((pf % FRAME_SIZE) != 0) 108 return EBADMEM; 109 110 if ((vp % PAGE_SIZE) != 0) 111 return EBADMEM; 112 113 /* 114 * Unprivileged tasks are only allowed to map pareas 115 * which are explicitly marked as such. 116 */ 117 bool priv = 118 ((cap_get(TASK) & CAP_MEM_MANAGER) == CAP_MEM_MANAGER); 112 119 113 120 mem_backend_data_t backend_data; … … 115 122 backend_data.frames = pages; 116 123 117 ipl_t ipl = interrupts_disable();118 119 124 /* Find the zone of the physical memory */ 120 spinlock_lock(&zones.lock);125 irq_spinlock_lock(&zones.lock, true); 121 126 size_t znum = find_zone(ADDR2PFN(pf), pages, 0); 122 127 123 128 if (znum == (size_t) -1) { 124 /* Frames not found in any zones 125 * -> assume it is hardware device and allow mapping 126 */ 127 spinlock_unlock(&zones.lock); 129 /* 130 * Frames not found in any zone 131 * -> assume it is a hardware device and allow mapping 132 * for privileged tasks. 133 */ 134 irq_spinlock_unlock(&zones.lock, true); 135 136 if (!priv) 137 return EPERM; 138 128 139 goto map; 129 140 } 130 141 131 142 if (zones.info[znum].flags & ZONE_FIRMWARE) { 132 /* Frames are part of firmware */ 133 spinlock_unlock(&zones.lock); 143 /* 144 * Frames are part of firmware 145 * -> allow mapping for privileged tasks. 146 */ 147 irq_spinlock_unlock(&zones.lock, true); 148 149 if (!priv) 150 return EPERM; 151 134 152 goto map; 135 153 } 136 154 137 155 if (zone_flags_available(zones.info[znum].flags)) { 138 /* Frames are part of physical memory, check if the memory 139 * region is enabled for mapping. 140 */ 141 spinlock_unlock(&zones.lock); 142 143 spinlock_lock(&parea_lock); 156 /* 157 * Frames are part of physical memory, check 158 * if the memory region is enabled for mapping. 159 */ 160 irq_spinlock_unlock(&zones.lock, true); 161 162 mutex_lock(&parea_lock); 144 163 btree_node_t *nodep; 145 164 parea_t *parea = (parea_t *) btree_search(&parea_btree, 146 165 (btree_key_t) pf, &nodep); 147 166 148 if ((!parea) || (parea->frames < pages)) 149 goto err; 150 151 spinlock_unlock(&parea_lock); 167 if ((!parea) || (parea->frames < pages)) { 168 mutex_unlock(&parea_lock); 169 return ENOENT; 170 } 171 172 if (!priv) { 173 if (!parea->unpriv) { 174 mutex_unlock(&parea_lock); 175 return EPERM; 176 } 177 } 178 179 mutex_unlock(&parea_lock); 152 180 goto map; 153 181 } 154 182 155 err: 156 spinlock_unlock(&zones.lock); 157 interrupts_restore(ipl); 183 irq_spinlock_unlock(&zones.lock, true); 158 184 return ENOENT; 159 185 160 186 map: 161 spinlock_lock(&TASK->lock);162 163 187 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, 164 188 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { … … 167 191 * We report it using ENOMEM. 168 192 */ 169 spinlock_unlock(&TASK->lock);170 interrupts_restore(ipl);171 193 return ENOMEM; 172 194 } … … 175 197 * Mapping is created on-demand during page fault. 176 198 */ 177 178 spinlock_unlock(&TASK->lock);179 interrupts_restore(ipl);180 199 return 0; 181 200 } … … 191 210 * 192 211 */ 193 static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 212 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 213 size_t size) 194 214 { 195 215 /* … … 200 220 return EPERM; 201 221 202 ipl_t ipl = interrupts_disable(); 203 spinlock_lock(&tasks_lock); 222 irq_spinlock_lock(&tasks_lock, true); 204 223 205 224 task_t *task = task_find_by_id(id); … … 211 230 * context. 212 231 */ 213 spinlock_unlock(&tasks_lock); 214 interrupts_restore(ipl); 232 irq_spinlock_unlock(&tasks_lock, true); 215 233 return ENOENT; 216 234 } 217 235 218 236 /* Lock the task and release the lock protecting tasks_btree. */ 219 spinlock_lock(&task->lock); 220 spinlock_unlock(&tasks_lock); 237 irq_spinlock_exchange(&tasks_lock, &task->lock); 221 238 222 239 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 223 240 224 spinlock_unlock(&task->lock); 225 interrupts_restore(ipl); 241 irq_spinlock_unlock(&task->lock, true); 226 242 227 243 return rc; … … 238 254 * 239 255 */ 240 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,241 unative_t pages, unative_t flags)242 { 243 return ( unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,256 sysarg_t sys_physmem_map(sysarg_t phys_base, sysarg_t virt_base, 257 sysarg_t pages, sysarg_t flags) 258 { 259 return (sysarg_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, 244 260 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), 245 261 (size_t) pages, (int) flags); … … 253 269 * 254 270 */ 255 unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg)271 sysarg_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg) 256 272 { 257 273 ddi_ioarg_t arg; 258 274 int rc = copy_from_uspace(&arg, uspace_io_arg, sizeof(ddi_ioarg_t)); 259 275 if (rc != 0) 260 return ( unative_t) rc;261 262 return ( unative_t) ddi_iospace_enable((task_id_t) arg.task_id,276 return (sysarg_t) rc; 277 278 return (sysarg_t) ddi_iospace_enable((task_id_t) arg.task_id, 263 279 (uintptr_t) arg.ioaddr, (size_t) arg.size); 264 280 } 265 281 266 /** Disable or enable preemption.267 *268 * @param enable If non-zero, the preemption counter will be decremented,269 * leading to potential enabling of preemption. Otherwise270 * the preemption counter will be incremented, preventing271 * preemption from occurring.272 *273 * @return Zero on success or EPERM if callers capabilities are not sufficient.274 *275 */276 unative_t sys_preempt_control(int enable)277 {278 if (!cap_get(TASK) & CAP_PREEMPT_CONTROL)279 return EPERM;280 281 if (enable)282 preemption_enable();283 else284 preemption_disable();285 286 return 0;287 }288 289 282 /** @} 290 283 */
Note:
See TracChangeset
for help on using the changeset viewer.