Changes in kernel/generic/src/ddi/ddi.c [953bc1ef:2fa10f6] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
r953bc1ef r2fa10f6 46 46 #include <mm/frame.h> 47 47 #include <mm/as.h> 48 #include <synch/ spinlock.h>48 #include <synch/mutex.h> 49 49 #include <syscall/copy.h> 50 50 #include <adt/btree.h> … … 52 52 #include <align.h> 53 53 #include <errno.h> 54 #include <trace.h> 54 55 55 56 /** This lock protects the parea_btree. */ 56 SPINLOCK_INITIALIZE(parea_lock);57 static mutex_t parea_lock; 57 58 58 59 /** B+tree with enabled physical memory areas. */ 59 60 static btree_t parea_btree; 60 61 61 /** Initialize DDI. */ 62 /** Initialize DDI. 63 * 64 */ 62 65 void ddi_init(void) 63 66 { 64 67 btree_create(&parea_btree); 68 mutex_initialize(&parea_lock, MUTEX_PASSIVE); 65 69 } 66 70 … … 72 76 void ddi_parea_register(parea_t *parea) 73 77 { 74 ipl_t ipl = interrupts_disable(); 75 spinlock_lock(&parea_lock); 78 mutex_lock(&parea_lock); 76 79 77 80 /* … … 80 83 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 81 84 82 spinlock_unlock(&parea_lock); 83 interrupts_restore(ipl); 85 mutex_unlock(&parea_lock); 84 86 } 85 87 … … 98 100 * 99 101 */ 100 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, int flags) 102 NO_TRACE static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, size_t pages, 103 unsigned int flags) 101 104 { 102 105 ASSERT(TASK); … … 115 118 backend_data.frames = pages; 116 119 117 ipl_t ipl = interrupts_disable();118 119 120 /* Find the zone of the physical memory */ 120 spinlock_lock(&zones.lock);121 irq_spinlock_lock(&zones.lock, true); 121 122 size_t znum = find_zone(ADDR2PFN(pf), pages, 0); 122 123 … … 125 126 * -> assume it is hardware device and allow mapping 126 127 */ 127 spinlock_unlock(&zones.lock);128 irq_spinlock_unlock(&zones.lock, true); 128 129 goto map; 129 130 } … … 131 132 if (zones.info[znum].flags & ZONE_FIRMWARE) { 132 133 /* Frames are part of firmware */ 133 spinlock_unlock(&zones.lock);134 irq_spinlock_unlock(&zones.lock, true); 134 135 goto map; 135 136 } 136 137 137 138 if (zone_flags_available(zones.info[znum].flags)) { 138 /* Frames are part of physical memory, check if the memory 139 /* 140 * Frames are part of physical memory, check if the memory 139 141 * region is enabled for mapping. 140 142 */ 141 spinlock_unlock(&zones.lock);143 irq_spinlock_unlock(&zones.lock, true); 142 144 143 spinlock_lock(&parea_lock);145 mutex_lock(&parea_lock); 144 146 btree_node_t *nodep; 145 147 parea_t *parea = (parea_t *) btree_search(&parea_btree, 146 148 (btree_key_t) pf, &nodep); 147 149 148 if ((!parea) || (parea->frames < pages)) 150 if ((!parea) || (parea->frames < pages)) { 151 mutex_unlock(&parea_lock); 149 152 goto err; 153 } 150 154 151 spinlock_unlock(&parea_lock);155 mutex_unlock(&parea_lock); 152 156 goto map; 153 157 } 154 158 159 irq_spinlock_unlock(&zones.lock, true); 160 155 161 err: 156 spinlock_unlock(&zones.lock);157 interrupts_restore(ipl);158 162 return ENOENT; 159 163 160 164 map: 161 spinlock_lock(&TASK->lock);162 163 165 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, 164 166 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { … … 167 169 * We report it using ENOMEM. 168 170 */ 169 spinlock_unlock(&TASK->lock);170 interrupts_restore(ipl);171 171 return ENOMEM; 172 172 } … … 175 175 * Mapping is created on-demand during page fault. 176 176 */ 177 178 spinlock_unlock(&TASK->lock);179 interrupts_restore(ipl);180 177 return 0; 181 178 } … … 191 188 * 192 189 */ 193 static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) 190 NO_TRACE static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, 191 size_t size) 194 192 { 195 193 /* … … 200 198 return EPERM; 201 199 202 ipl_t ipl = interrupts_disable(); 203 spinlock_lock(&tasks_lock); 200 irq_spinlock_lock(&tasks_lock, true); 204 201 205 202 task_t *task = task_find_by_id(id); … … 211 208 * context. 212 209 */ 213 spinlock_unlock(&tasks_lock); 214 interrupts_restore(ipl); 210 irq_spinlock_unlock(&tasks_lock, true); 215 211 return ENOENT; 216 212 } 217 213 218 214 /* Lock the task and release the lock protecting tasks_btree. */ 219 spinlock_lock(&task->lock); 220 spinlock_unlock(&tasks_lock); 215 irq_spinlock_exchange(&tasks_lock, &task->lock); 221 216 222 217 int rc = ddi_iospace_enable_arch(task, ioaddr, size); 223 218 224 spinlock_unlock(&task->lock); 225 interrupts_restore(ipl); 219 irq_spinlock_unlock(&task->lock, true); 226 220 227 221 return rc; … … 264 258 } 265 259 266 /** Disable or enable preemption.267 *268 * @param enable If non-zero, the preemption counter will be decremented,269 * leading to potential enabling of preemption. Otherwise270 * the preemption counter will be incremented, preventing271 * preemption from occurring.272 *273 * @return Zero on success or EPERM if callers capabilities are not sufficient.274 *275 */276 unative_t sys_preempt_control(int enable)277 {278 if (!(cap_get(TASK) & CAP_PREEMPT_CONTROL))279 return EPERM;280 281 if (enable)282 preemption_enable();283 else284 preemption_disable();285 286 return 0;287 }288 289 /** Disable or enable specified interrupts.290 *291 * @param irq the interrupt to be enabled/disabled.292 * @param enable if true enable the interrupt, disable otherwise.293 *294 * @retutn Zero on success, error code otherwise.295 */296 unative_t sys_interrupt_enable(int irq, int enable)297 {298 cap_t task_cap = cap_get(TASK);299 if (!(task_cap & CAP_PREEMPT_CONTROL) || !(task_cap & CAP_IRQ_REG))300 return EPERM;301 302 if (irq < 0 || irq > 16) {303 return EINVAL;304 }305 306 uint16_t irq_mask = (uint16_t)(1 << irq);307 if (enable) {308 trap_virtual_enable_irqs(irq_mask);309 } else {310 trap_virtual_disable_irqs(irq_mask);311 }312 313 return 0;314 }315 316 260 /** @} 317 261 */
Note:
See TracChangeset
for help on using the changeset viewer.