Changeset f8ddd17 in mainline
- Timestamp:
- 2006-12-09T20:20:50Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b82a13c
- Parents:
- 9ab9c2ec
- Files:
-
- 1 added
- 36 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia32/src/drivers/ega.c
r9ab9c2ec rf8ddd17 47 47 #include <console/console.h> 48 48 #include <sysinfo/sysinfo.h> 49 #include <ddi/ddi.h> 49 50 50 51 /* … … 52 53 * Simple and short. Function for displaying characters and "scrolling". 53 54 */ 55 56 static parea_t ega_parea; /**< Physical memory area for EGA video RAM. */ 54 57 55 58 SPINLOCK_INITIALIZE(egalock); … … 80 83 stdout = &ega_console; 81 84 85 ega_parea.pbase = VIDEORAM; 86 ega_parea.vbase = (uintptr_t) videoram; 87 ega_parea.frames = 1; 88 ega_parea.cacheable = false; 89 ddi_parea_register(&ega_parea); 90 82 91 sysinfo_set_item_val("fb", NULL, true); 83 92 sysinfo_set_item_val("fb.kind", NULL, 2); … … 85 94 sysinfo_set_item_val("fb.height", NULL, ROWS); 86 95 sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM); 96 sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t) 97 videoram)); 87 98 88 99 #ifndef CONFIG_FB -
kernel/arch/sparc64/include/interrupt.h
r9ab9c2ec rf8ddd17 48 48 49 49 enum { 50 IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI, 51 IPI_DCACHE_SHOOTDOWN 50 IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI 52 51 }; 53 52 -
kernel/arch/sparc64/include/mm/cache.h
r9ab9c2ec rf8ddd17 36 36 #define KERN_sparc64_CACHE_H_ 37 37 38 #ifdef CONFIG_SMP39 extern void dcache_shootdown_start(void);40 extern void dcache_shootdown_finalize(void);41 extern void dcache_shootdown_ipi_recv(void);42 #else /* CONFIG_SMP */43 #define dcache_shootdown_start();44 #define dcache_shootdown_finalize();45 #define dcache_shootdown_ipi_recv();46 #endif /* CONFIG_SMP */47 48 38 extern void dcache_flush(void); 49 39 -
kernel/arch/sparc64/src/mm/as.c
r9ab9c2ec rf8ddd17 49 49 #include <macros.h> 50 50 #endif /* CONFIG_TSB */ 51 52 #ifdef CONFIG_VIRT_IDX_DCACHE53 #include <arch/mm/cache.h>54 #endif /* CONFIG_VIRT_IDX_DCACHE */55 51 56 52 /** Architecture dependent address space init. */ … … 163 159 dtsb_base_write(tsb_base.value); 164 160 #endif 165 #ifdef CONFIG_VIRT_IDX_DCACHE166 if (as->dcache_flush_on_install) {167 /*168 * Some mappings in this address space are illegal address169 * aliases. Upon their creation, the dcache_flush_on_install170 * flag was set.171 *172 * We are now obliged to flush the D-cache in order to guarantee173 * that there will be at most one cache line for each address174 * alias.175 *176 * This flush performs a cleanup after another address space in177 * which the alias might have existed.178 */179 dcache_flush();180 }181 #endif /* CONFIG_VIRT_IDX_DCACHE */182 161 } 183 162 … … 214 193 } 215 194 #endif 216 #ifdef CONFIG_VIRT_IDX_DCACHE217 if (as->dcache_flush_on_deinstall) {218 /*219 * Some mappings in this address space are illegal address220 * aliases. Upon their creation, the dcache_flush_on_deinstall221 * flag was set.222 *223 * We are now obliged to flush the D-cache in order to guarantee224 * that there will be at most one cache line for each address225 * alias.226 *227 * This flush performs a cleanup after this address space. It is228 * necessary because other address spaces that contain the same229 * alias are not necessarily aware of the need to carry out the230 * cache flush. The only address spaces that are aware of it are231 * those that created the illegal alias.232 */233 dcache_flush();234 }235 #endif /* CONFIG_VIRT_IDX_DCACHE */236 195 } 237 196 -
kernel/arch/sparc64/src/mm/cache.c
r9ab9c2ec rf8ddd17 32 32 /** 33 33 * @file 34 * @brief D-cache shootdown algorithm.35 34 */ 36 35 37 36 #include <arch/mm/cache.h> 38 37 39 #ifdef CONFIG_SMP40 41 #include <smp/ipi.h>42 #include <arch/interrupt.h>43 #include <synch/spinlock.h>44 #include <arch.h>45 #include <debug.h>46 47 /**48 * This spinlock is used by the processors to synchronize during the D-cache49 * shootdown.50 */51 SPINLOCK_INITIALIZE(dcachelock);52 53 /** Initialize the D-cache shootdown sequence.54 *55 * Start the shootdown sequence by sending out an IPI and wait until all56 * processors spin on the dcachelock spinlock.57 */58 void dcache_shootdown_start(void)59 {60 int i;61 62 CPU->arch.dcache_active = 0;63 spinlock_lock(&dcachelock);64 65 ipi_broadcast(IPI_DCACHE_SHOOTDOWN);66 67 busy_wait:68 for (i = 0; i < config.cpu_count; i++)69 if (cpus[i].arch.dcache_active)70 goto busy_wait;71 }72 73 /** Finish the D-cache shootdown sequence. */74 void dcache_shootdown_finalize(void)75 {76 spinlock_unlock(&dcachelock);77 CPU->arch.dcache_active = 1;78 }79 80 /** Process the D-cache shootdown IPI. */81 void dcache_shootdown_ipi_recv(void)82 {83 ASSERT(CPU);84 85 CPU->arch.dcache_active = 0;86 spinlock_lock(&dcachelock);87 spinlock_unlock(&dcachelock);88 89 dcache_flush();90 91 CPU->arch.dcache_active = 1;92 }93 94 #endif /* CONFIG_SMP */95 96 38 /** @} 97 39 */ -
kernel/arch/sparc64/src/mm/page.c
r9ab9c2ec rf8ddd17 74 74 for (i = 0; i < bsp_locked_dtlb_entries; i++) { 75 75 dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, 76 bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code, 77 true, false); 76 bsp_locked_dtlb_entry[i].phys_page, 77 bsp_locked_dtlb_entry[i].pagesize_code, true, 78 false); 78 79 } 79 80 #endif … … 152 153 * Second, save the information about the mapping for APs. 153 154 */ 154 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment; 155 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment; 156 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code; 155 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = 156 virtaddr + i*sizemap[order].increment; 157 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = 158 physaddr + i*sizemap[order].increment; 159 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = 160 sizemap[order].pagesize_code; 157 161 bsp_locked_dtlb_entries++; 158 162 #endif -
kernel/arch/sparc64/src/smp/ipi.c
r9ab9c2ec rf8ddd17 39 39 #include <config.h> 40 40 #include <mm/tlb.h> 41 #include <arch/mm/cache.h>42 41 #include <arch/interrupt.h> 43 42 #include <arch/trap/interrupt.h> … … 122 121 func = tlb_shootdown_ipi_recv; 123 122 break; 124 case IPI_DCACHE_SHOOTDOWN:125 func = dcache_shootdown_ipi_recv;126 break;127 123 default: 128 124 panic("Unknown IPI (%d).\n", ipi); -
kernel/arch/sparc64/src/trap/interrupt.c
r9ab9c2ec rf8ddd17 45 45 #include <arch.h> 46 46 #include <mm/tlb.h> 47 #include <arch/mm/cache.h>48 47 #include <config.h> 49 48 #include <synch/spinlock.h> … … 92 91 if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) { 93 92 tlb_shootdown_ipi_recv(); 94 } else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {95 dcache_shootdown_ipi_recv();96 93 } 97 94 #endif -
kernel/genarch/src/fb/fb.c
r9ab9c2ec rf8ddd17 46 46 #include <bitops.h> 47 47 #include <print.h> 48 #include <ddi/ddi.h> 48 49 49 50 #include "helenos.xbm" 51 52 static parea_t fb_parea; /**< Physical memory area for fb. */ 50 53 51 54 SPINLOCK_INITIALIZE(fb_lock); … … 435 438 columns = x / COL_WIDTH; 436 439 440 fb_parea.pbase = (uintptr_t) addr; 441 fb_parea.vbase = (uintptr_t) fbaddress; 442 fb_parea.frames = SIZE2FRAMES(fbsize); 443 fb_parea.cacheable = false; 444 ddi_parea_register(&fb_parea); 445 437 446 sysinfo_set_item_val("fb", NULL, true); 438 447 sysinfo_set_item_val("fb.kind", NULL, 1); … … 442 451 sysinfo_set_item_val("fb.visual", NULL, visual); 443 452 sysinfo_set_item_val("fb.address.physical", NULL, addr); 453 sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t) 454 fbaddress)); 444 455 sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors); 445 456 -
kernel/generic/include/ddi/ddi.h
r9ab9c2ec rf8ddd17 40 40 #include <typedefs.h> 41 41 42 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages, 43 unative_t flags); 42 /** Structure representing contiguous physical memory area. */ 43 typedef struct { 44 uintptr_t pbase; /**< Physical base of the area. */ 45 uintptr_t vbase; /**< Virtual base of the area. */ 46 count_t frames; /**< Number of frames in the area. */ 47 bool cacheable; /**< Cacheability. */ 48 } parea_t; 49 50 extern void ddi_init(void); 51 extern void ddi_parea_register(parea_t *parea); 52 53 extern unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, 54 unative_t pages, unative_t flags); 44 55 extern unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg); 45 56 extern unative_t sys_preempt_control(int enable); -
kernel/generic/include/mm/as.h
r9ab9c2ec rf8ddd17 95 95 asid_t asid; 96 96 97 #ifdef CONFIG_VIRT_IDX_DCACHE98 bool dcache_flush_on_install;99 bool dcache_flush_on_deinstall;100 #endif /* CONFIG_VIRT_IDX_DCACHE */101 102 97 /** Architecture specific content. */ 103 98 as_arch_t arch; … … 166 161 /** Data to be used by the backend. */ 167 162 mem_backend_data_t backend_data; 168 169 /**170 * Virtual color of the original address space area that was at the beginning171 * of the share chain.172 */173 int orig_color;174 163 }; 175 164 -
kernel/generic/include/mm/page.h
r9ab9c2ec rf8ddd17 71 71 * Macro for computing page color. 72 72 */ 73 #define PAGE_COLOR(va) 73 #define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1)) 74 74 75 75 /** Page fault access type. */ … … 83 83 /** Operations to manipulate page mappings. */ 84 84 struct page_mapping_operations { 85 void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int flags); 85 void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int 86 flags); 86 87 void (* mapping_remove)(as_t *as, uintptr_t page); 87 88 pte_t *(* mapping_find)(as_t *as, uintptr_t page); … … 94 95 extern void page_table_lock(as_t *as, bool lock); 95 96 extern void page_table_unlock(as_t *as, bool unlock); 96 extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags); 97 extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int 98 flags); 97 99 extern void page_mapping_remove(as_t *as, uintptr_t page); 98 100 extern pte_t *page_mapping_find(as_t *as, uintptr_t page); -
kernel/generic/src/console/klog.c
r9ab9c2ec rf8ddd17 39 39 #include <ddi/device.h> 40 40 #include <ddi/irq.h> 41 #include <ddi/ddi.h> 41 42 #include <ipc/irq.h> 42 43 44 /** Physical memory area used for klog. */ 45 static parea_t klog_parea; 46 43 47 /* 44 48 * For now, we use 0 as INR. 45 * However, on some architectures 0 is the clock interrupt (e.g. amd64 and ia32).46 * It is therefore desirable to have architecture specific definition of KLOG_VIRT_INR47 * in the future.49 * However, on some architectures 0 is the clock interrupt (e.g. amd64 and 50 * ia32). It is therefore desirable to have architecture specific definition of 51 * KLOG_VIRT_INR in the future. 48 52 */ 49 53 #define KLOG_VIRT_INR 0 … … 76 80 if (!faddr) 77 81 panic("Cannot allocate page for klog"); 78 klog = (char *) PA2KA(faddr);82 klog = (char *) PA2KA(faddr); 79 83 80 84 devno_t devno = device_assign_devno(); 81 85 82 sysinfo_set_item_val("klog.faddr", NULL, (unative_t)faddr); 86 klog_parea.pbase = (uintptr_t) faddr; 87 klog_parea.vbase = (uintptr_t) klog; 88 klog_parea.frames = 1 << KLOG_ORDER; 89 klog_parea.cacheable = true; 90 ddi_parea_register(&klog_parea); 91 92 sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr); 93 sysinfo_set_item_val("klog.fcolor", NULL, (unative_t) 94 PAGE_COLOR((uintptr_t) klog)); 83 95 sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER); 84 96 sysinfo_set_item_val("klog.devno", NULL, devno); -
kernel/generic/src/ddi/ddi.c
r9ab9c2ec rf8ddd17 48 48 #include <synch/spinlock.h> 49 49 #include <syscall/copy.h> 50 #include <adt/btree.h> 50 51 #include <arch.h> 51 52 #include <align.h> 52 53 #include <errno.h> 53 54 55 /** This lock protects the parea_btree. */ 56 SPINLOCK_INITIALIZE(parea_lock); 57 58 /** B+tree with enabled physical memory areas. */ 59 static btree_t parea_btree; 60 61 /** Initialize DDI. */ 62 void ddi_init(void) 63 { 64 btree_create(&parea_btree); 65 } 66 67 /** Enable piece of physical memory for mapping by physmem_map(). 68 * 69 * @param parea Pointer to physical area structure. 70 * 71 * @todo This function doesn't check for overlaps. It depends on the kernel to 72 * create disjunct physical memory areas. 73 */ 74 void ddi_parea_register(parea_t *parea) 75 { 76 ipl_t ipl; 77 78 ipl = interrupts_disable(); 79 spinlock_lock(&parea_lock); 80 81 /* 82 * TODO: we should really check for overlaps here. 83 * However, we should be safe because the kernel is pretty sane and 84 * memory of different devices doesn't overlap. 85 */ 86 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 87 88 spinlock_unlock(&parea_lock); 89 interrupts_restore(ipl); 90 } 91 54 92 /** Map piece of physical memory into virtual address space of current task. 55 93 * 56 * @param pf Physical frameaddress of the starting frame.57 * @param vp Virtual pageaddress of the starting page.94 * @param pf Physical address of the starting frame. 95 * @param vp Virtual address of the starting page. 58 96 * @param pages Number of pages to map. 59 97 * @param flags Address space area flags for the mapping. 60 98 * 61 * @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, 62 * ENOENT if there is no task matching the specified ID and ENOMEM if 63 * there was a problem in creating address space area. 99 * @return 0 on success, EPERM if the caller lacks capabilities to use this 100 * syscall, ENOENT if there is no task matching the specified ID or the 101 * physical address space is not enabled for mapping and ENOMEM if there 102 * was a problem in creating address space area. ENOTSUP is returned when 103 * an attempt to create an illegal address alias is detected. 64 104 */ 65 105 static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags) … … 80 120 81 121 ipl = interrupts_disable(); 122 123 /* 124 * Check if the physical memory area is enabled for mapping. 125 * If the architecture supports virtually indexed caches, intercept 126 * attempts to create an illegal address alias. 127 */ 128 spinlock_lock(&parea_lock); 129 parea_t *parea; 130 btree_node_t *nodep; 131 parea = btree_search(&parea_btree, (btree_key_t) pf, &nodep); 132 if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) && 133 !parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) && 134 parea->cacheable)) { 135 /* 136 * This physical memory area cannot be mapped. 137 */ 138 spinlock_unlock(&parea_lock); 139 interrupts_restore(ipl); 140 return ENOENT; 141 } 142 143 #ifdef CONFIG_VIRT_IDX_DCACHE 144 if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) { 145 /* 146 * Refuse to create an illegal address alias. 147 */ 148 spinlock_unlock(&parea_lock); 149 interrupts_restore(ipl); 150 return ENOTSUP; 151 } 152 #endif /* CONFIG_VIRT_IDX_DCACHE */ 153 154 spinlock_unlock(&parea_lock); 155 82 156 spinlock_lock(&TASK->lock); 83 157 … … 108 182 * @param size Size of the enabled I/O space.. 109 183 * 110 * @return 0 on success, EPERM if the caller lacks capabilities to use this syscall,111 * 184 * @return 0 on success, EPERM if the caller lacks capabilities to use this 185 * syscall, ENOENT if there is no task matching the specified ID. 112 186 */ 113 187 static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) … … 161 235 * @return 0 on success, otherwise it returns error code found in errno.h 162 236 */ 163 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages,164 165 { 166 return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, FRAME_SIZE),167 ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), (count_t) pages,168 237 unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t 238 pages, unative_t flags) 239 { 240 return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, 241 FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), 242 (count_t) pages, (int) flags); 169 243 } 170 244 … … 184 258 return (unative_t) rc; 185 259 186 return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, (uintptr_t) arg.ioaddr, (size_t) arg.size); 260 return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, 261 (uintptr_t) arg.ioaddr, (size_t) arg.size); 187 262 } 188 263 189 264 /** Disable or enable preemption. 190 265 * 191 * @param enable If non-zero, the preemption counter will be decremented, leading to potential192 * enabling of preemption. Otherwise the preemption counter will be incremented,193 * 266 * @param enable If non-zero, the preemption counter will be decremented, 267 * leading to potential enabling of preemption. Otherwise the preemption 268 * counter will be incremented, preventing preemption from occurring. 194 269 * 195 270 * @return Zero on success or EPERM if callers capabilities are not sufficient. -
kernel/generic/src/lib/rd.c
r9ab9c2ec rf8ddd17 42 42 #include <mm/frame.h> 43 43 #include <sysinfo/sysinfo.h> 44 #include <ddi/ddi.h> 45 46 static parea_t rd_parea; /**< Physical memory area for rd. */ 44 47 45 48 int init_rd(rd_header * header, size_t size) 46 49 { 47 50 /* Identify RAM disk */ 48 if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) 51 if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || 52 (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) 49 53 return RE_INVALID; 50 54 … … 77 81 dsize = size - hsize; 78 82 83 rd_parea.pbase = KA2PA((void *) header + hsize); 84 rd_parea.vbase = (uintptr_t) ((void *) header + hsize); 85 rd_parea.frames = SIZE2FRAMES(dsize); 86 rd_parea.cacheable = true; 87 ddi_parea_register(&rd_parea); 88 79 89 sysinfo_set_item_val("rd", NULL, true); 80 90 sysinfo_set_item_val("rd.size", NULL, dsize); 81 sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) KA2PA((void *) header + hsize)); 91 sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) 92 KA2PA((void *) header + hsize)); 93 sysinfo_set_item_val("rd.address.color", NULL, (unative_t) 94 PAGE_COLOR((uintptr_t) header + hsize)); 82 95 83 96 return RE_OK; -
kernel/generic/src/main/main.c
r9ab9c2ec rf8ddd17 81 81 #include <console/klog.h> 82 82 #include <smp/smp.h> 83 #include <ddi/ddi.h> 83 84 84 85 /** Global configuration structure. */ … … 103 104 * appropriate sizes and addresses. 104 105 */ 105 uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel is loaded. */ 106 size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. */ 107 size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. */ 108 109 uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address */ 106 uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel 107 * is loaded. */ 108 size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. 109 */ 110 size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. 111 */ 112 uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address. 113 */ 110 114 111 115 void main_bsp(void); … … 142 146 config.memory_size = get_memory_size(); 143 147 144 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE); 148 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + 149 hardcoded_kdata_size, PAGE_SIZE); 145 150 config.stack_size = CONFIG_STACK_SIZE; 146 151 … … 151 156 count_t i; 152 157 for (i = 0; i < init.cnt; i++) { 153 if (PA_overlaps(config.stack_base, config.stack_size, init.tasks[i].addr, init.tasks[i].size)) 154 config.stack_base = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, config.stack_size); 158 if (PA_overlaps(config.stack_base, config.stack_size, 159 init.tasks[i].addr, init.tasks[i].size)) 160 config.stack_base = ALIGN_UP(init.tasks[i].addr + 161 init.tasks[i].size, config.stack_size); 155 162 } 156 163 157 164 /* Avoid placing stack on top of boot allocations. */ 158 165 if (ballocs.size) { 159 if (PA_overlaps(config.stack_base, config.stack_size, ballocs.base, ballocs.size)) 160 config.stack_base = ALIGN_UP(ballocs.base + ballocs.size, PAGE_SIZE); 166 if (PA_overlaps(config.stack_base, config.stack_size, 167 ballocs.base, ballocs.size)) 168 config.stack_base = ALIGN_UP(ballocs.base + 169 ballocs.size, PAGE_SIZE); 161 170 } 162 171 … … 165 174 166 175 context_save(&ctx); 167 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, THREAD_STACK_SIZE); 176 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, 177 THREAD_STACK_SIZE); 168 178 context_restore(&ctx); 169 179 /* not reached */ … … 201 211 */ 202 212 arch_pre_mm_init(); 203 frame_init(); /* Initialize at least 1 memory segment big enough for slab to work */ 213 frame_init(); 214 /* Initialize at least 1 memory segment big enough for slab to work. */ 204 215 slab_cache_init(); 205 216 btree_init(); … … 207 218 page_init(); 208 219 tlb_init(); 220 ddi_init(); 209 221 arch_post_mm_init(); 210 222 211 223 version_print(); 212 printf("kernel: %.*p hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10); 213 printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, config.stack_base, config.stack_size >> 10); 224 printf("kernel: %.*p hardcoded_ktext_size=%zdK, " 225 "hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, 226 config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 227 10); 228 printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, 229 config.stack_base, config.stack_size >> 10); 214 230 215 231 arch_pre_smp_init(); 216 232 smp_init(); 217 218 slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */233 /* Slab must be initialized after we know the number of processors. */ 234 slab_enable_cpucache(); 219 235 220 236 printf("config.memory_size=%zdM\n", config.memory_size >> 20); … … 233 249 if (init.cnt > 0) { 234 250 for (i = 0; i < init.cnt; i++) 235 printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(uintptr_t) * 2, init.tasks[i].addr, i, init.tasks[i].size); 251 printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, 252 sizeof(uintptr_t) * 2, init.tasks[i].addr, i, 253 init.tasks[i].size); 236 254 } else 237 255 printf("No init binaries found\n"); … … 305 323 * switch to this cpu's private stack prior to waking kmp up. 306 324 */ 307 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE); 325 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), 326 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 308 327 context_restore(&CPU->saved_context); 309 328 /* not reached */ -
kernel/generic/src/mm/as.c
r9ab9c2ec rf8ddd17 167 167 as->page_table = page_table_create(flags); 168 168 169 #ifdef CONFIG_VIRT_IDX_DCACHE170 as->dcache_flush_on_install = false;171 as->dcache_flush_on_deinstall = false;172 #endif /* CONFIG_VIRT_IDX_DCACHE */173 174 169 return as; 175 170 } … … 278 273 else 279 274 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); 280 281 #ifdef CONFIG_VIRT_IDX_DCACHE282 /*283 * When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the284 * orig_color is probably wrong until the flag is reset. In other words, it is285 * initialized with the color of the area being created and not with the color286 * of the original address space area at the beginning of the share chain. Of287 * course, the correct color is set by as_area_share() before the flag is288 * reset.289 */290 a->orig_color = PAGE_COLOR(base);291 #endif /* CONFIG_VIRT_IDX_DCACHE */292 275 293 276 btree_create(&a->used_space); … … 576 559 * or ENOMEM if there was a problem in allocating destination address space 577 560 * area. ENOTSUP is returned if the address space area backend does not support 578 * sharing. 561 * sharing or if the kernel detects an attempt to create an illegal address 562 * alias. 579 563 */ 580 564 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, … … 584 568 int src_flags; 585 569 size_t src_size; 586 int src_orig_color;587 570 as_area_t *src_area, *dst_area; 588 571 share_info_t *sh_info; … … 601 584 return ENOENT; 602 585 } 603 604 586 605 587 if (!src_area->backend || !src_area->backend->share) { … … 618 600 src_backend = src_area->backend; 619 601 src_backend_data = src_area->backend_data; 620 src_orig_color = src_area->orig_color;621 602 622 603 /* Share the cacheable flag from the original mapping */ … … 630 611 return EPERM; 631 612 } 613 614 #ifdef CONFIG_VIRT_IDX_DCACHE 615 if (!(dst_flags_mask & AS_AREA_EXEC)) { 616 if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) { 617 /* 618 * Refuse to create an illegal address alias. 619 */ 620 mutex_unlock(&src_area->lock); 621 mutex_unlock(&src_as->lock); 622 interrupts_restore(ipl); 623 return ENOTSUP; 624 } 625 } 626 #endif /* CONFIG_VIRT_IDX_DCACHE */ 632 627 633 628 /* … … 683 678 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 684 679 dst_area->sh_info = sh_info; 685 dst_area->orig_color = src_orig_color;686 #ifdef CONFIG_VIRT_IDX_DCACHE687 if (src_orig_color != PAGE_COLOR(dst_base)) {688 /*689 * We have just detected an attempt to create an invalid address690 * alias. We allow this and set a special flag that tells the691 * architecture specific code to flush the D-cache when the692 * offending address space is installed and deinstalled693 * (cleanup).694 *695 * In order for the flags to take effect immediately, we also696 * perform a global D-cache shootdown.697 */698 dcache_shootdown_start();699 dst_as->dcache_flush_on_install = true;700 dst_as->dcache_flush_on_deinstall = true;701 dcache_flush();702 dcache_shootdown_finalize();703 }704 #endif /* CONFIG_VIRT_IDX_DCACHE */705 680 mutex_unlock(&dst_area->lock); 706 681 mutex_unlock(&dst_as->lock); -
kernel/generic/src/sysinfo/sysinfo.c
r9ab9c2ec rf8ddd17 231 231 232 232 switch (root->val_type) { 233 234 235 236 237 238 239 240 241 242 243 244 233 case SYSINFO_VAL_UNDEFINED: 234 val = 0; 235 vtype = "UND"; 236 break; 237 case SYSINFO_VAL_VAL: 238 val = root->val.val; 239 vtype = "VAL"; 240 break; 241 case SYSINFO_VAL_FUNCTION: 242 val = ((sysinfo_val_fn_t) (root->val.fn)) (root); 243 vtype = "FUN"; 244 break; 245 245 } 246 246 247 printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? "TAB" : "FUN")); 247 printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, 248 val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? 249 "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? 250 "TAB" : "FUN")); 248 251 249 252 if (root->subinfo_type == SYSINFO_SUBINFO_TABLE) -
kernel/generic/src/time/clock.c
r9ab9c2ec rf8ddd17 55 55 #include <sysinfo/sysinfo.h> 56 56 #include <arch/barrier.h> 57 #include <mm/frame.h> 58 #include <ddi/ddi.h> 59 60 /** Physical memory area of the real time clock. */ 61 static parea_t clock_parea; 57 62 58 63 /* Pointers to public variables with time */ … … 73 78 * information about realtime data. We allocate 1 page with these 74 79 * data and update it periodically. 75 *76 *77 80 */ 78 81 void clock_counter_init(void) … … 80 83 void *faddr; 81 84 82 faddr = frame_alloc( 0, FRAME_ATOMIC);85 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 83 86 if (!faddr) 84 87 panic("Cannot allocate page for clock"); 85 88 86 public_time = (struct ptime *) PA2KA(faddr);89 public_time = (struct ptime *) PA2KA(faddr); 87 90 88 91 /* TODO: We would need some arch dependent settings here */ … … 91 94 public_time->useconds = 0; 92 95 93 sysinfo_set_item_val("clock.faddr", NULL, (unative_t)faddr); 96 clock_parea.pbase = (uintptr_t) faddr; 97 clock_parea.vbase = (uintptr_t) public_time; 98 clock_parea.frames = 1; 99 clock_parea.cacheable = true; 100 ddi_parea_register(&clock_parea); 101 102 /* 103 * Prepare information for the userspace so that it can successfully 104 * physmem_map() the clock_parea. 105 */ 106 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); 107 sysinfo_set_item_val("clock.fcolor", NULL, (unative_t) 108 PAGE_COLOR(clock_parea.vbase)); 109 sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr); 94 110 } 95 111 -
uspace/fb/ega.c
r9ab9c2ec rf8ddd17 35 35 */ 36 36 37 38 37 #include <stdlib.h> 39 38 #include <unistd.h> … … 63 62 saved_screen saved_screens[MAX_SAVED_SCREENS]; 64 63 65 66 64 #define EGA_IO_ADDRESS 0x3d4 67 65 #define EGA_IO_SIZE 2 … … 127 125 int i; 128 126 if (rows > 0) { 129 memcpy (scr_addr,((char *)scr_addr) + rows * scr_width * 2, scr_width * scr_height * 2 - rows * scr_width * 2); 127 memcpy (scr_addr,((char *)scr_addr) + rows * scr_width * 2, 128 scr_width * scr_height * 2 - rows * scr_width * 2); 130 129 for (i = 0; i < rows * scr_width ; i ++) 131 (((short *)scr_addr) + scr_width * scr_height - rows * scr_width) [i] = ((style << 8) + ' '); 130 (((short *)scr_addr) + scr_width * scr_height - rows * 131 scr_width) [i] = ((style << 8) + ' '); 132 132 } else if (rows < 0) { 133 134 memcpy (((char *)scr_addr) - rows * scr_width * 2 ,scr_addr ,scr_width * scr_height * 2 + rows * scr_width * 2);133 memcpy (((char *)scr_addr) - rows * scr_width * 2, scr_addr, 134 scr_width * scr_height * 2 + rows * scr_width * 2); 135 135 for (i = 0; i < - rows * scr_width ; i++) 136 136 ((short *)scr_addr) [i] = ((style << 8 ) + ' '); … … 309 309 scr_width=sysinfo_value("fb.width"); 310 310 scr_height=sysinfo_value("fb.height"); 311 iospace_enable(task_get_id(),(void *)EGA_IO_ADDRESS,2); 312 313 sz = scr_width*scr_height*2; 314 scr_addr = as_get_mappable_page(sz); 315 316 physmem_map(ega_ph_addr, scr_addr, ALIGN_UP(sz, PAGE_SIZE) >> PAGE_WIDTH, 317 AS_AREA_READ | AS_AREA_WRITE); 311 iospace_enable(task_get_id(), (void *) EGA_IO_ADDRESS, 2); 312 313 sz = scr_width * scr_height * 2; 314 scr_addr = as_get_mappable_page(sz, (int) 315 sysinfo_value("fb.address.color")); 316 317 physmem_map(ega_ph_addr, scr_addr, ALIGN_UP(sz, PAGE_SIZE) >> 318 PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE); 318 319 319 320 async_set_client_connection(ega_client_connection); -
uspace/fb/fb.c
r9ab9c2ec rf8ddd17 705 705 /* We accept one area for data interchange */ 706 706 if (IPC_GET_ARG1(*call) == shm_id) { 707 void *dest = as_get_mappable_page(IPC_GET_ARG2(*call)); 707 void *dest = as_get_mappable_page(IPC_GET_ARG2(*call), 708 PAGE_COLOR(IPC_GET_ARG1(*call))); 708 709 shm_size = IPC_GET_ARG2(*call); 709 if (!ipc_answer_fast(callid, 0, (sysarg_t) dest, 0))710 if (!ipc_answer_fast(callid, 0, (sysarg_t) dest, 0)) 710 711 shm = dest; 711 712 else … … 717 718 } else { 718 719 intersize = IPC_GET_ARG2(*call); 719 receive_comm_area(callid, call,(void *)&interbuffer);720 receive_comm_area(callid, call, (void *) &interbuffer); 720 721 } 721 722 return 1; … … 1283 1284 1284 1285 asz = fb_scanline * fb_height; 1285 fb_addr = as_get_mappable_page(asz );1286 fb_addr = as_get_mappable_page(asz, (int) sysinfo_value("fb.address.color")); 1286 1287 1287 1288 physmem_map(fb_ph_addr, fb_addr, ALIGN_UP(asz, PAGE_SIZE) >> PAGE_WIDTH, 1288 1289 AS_AREA_READ | AS_AREA_WRITE); 1289 1290 1290 if (screen_init(fb_addr, fb_width, fb_height, fb_scanline, fb_visual, fb_invert_colors)) 1291 if (screen_init(fb_addr, fb_width, fb_height, fb_scanline, fb_visual, 1292 fb_invert_colors)) 1291 1293 return 0; 1292 1294 -
uspace/fb/main.c
r9ab9c2ec rf8ddd17 44 44 void *dest; 45 45 46 dest = as_get_mappable_page(IPC_GET_ARG2(*call)); 46 dest = as_get_mappable_page(IPC_GET_ARG2(*call), 47 PAGE_COLOR(IPC_GET_ARG1(*call))); 47 48 if (ipc_answer_fast(callid, 0, (sysarg_t)dest, 0) == 0) { 48 49 if (*area) -
uspace/klog/klog.c
r9ab9c2ec rf8ddd17 64 64 printf("Kernel console output.\n"); 65 65 66 mapping = as_get_mappable_page(PAGE_SIZE );66 mapping = as_get_mappable_page(PAGE_SIZE, sysinfo_value("klog.fcolor")); 67 67 res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, 68 (sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_KLOG,69 NULL, NULL,NULL);68 (sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_KLOG, 69 NULL, NULL, NULL); 70 70 if (res) { 71 71 printf("Failed to initialize klog memarea\n"); -
uspace/libc/arch/amd64/include/config.h
r9ab9c2ec rf8ddd17 38 38 #define PAGE_WIDTH 12 39 39 #define PAGE_SIZE (1<<PAGE_WIDTH) 40 #define PAGE_COLOR_BITS 0 /* dummy */ 40 41 41 42 #endif -
uspace/libc/arch/ia32/include/config.h
r9ab9c2ec rf8ddd17 38 38 #define PAGE_WIDTH 12 39 39 #define PAGE_SIZE (1<<PAGE_WIDTH) 40 #define PAGE_COLOR_BITS 0 /* dummy */ 40 41 41 42 #endif -
uspace/libc/arch/ia64/include/config.h
r9ab9c2ec rf8ddd17 38 38 #define PAGE_WIDTH 14 39 39 #define PAGE_SIZE (1<<PAGE_WIDTH) 40 #define PAGE_COLOR_BITS 0 /* dummy */ 40 41 41 42 #endif -
uspace/libc/arch/mips32/include/config.h
r9ab9c2ec rf8ddd17 38 38 #define PAGE_WIDTH 14 39 39 #define PAGE_SIZE (1<<PAGE_WIDTH) 40 #define PAGE_COLOR_BITS 0 /* dummy */ 40 41 41 42 #endif -
uspace/libc/arch/ppc32/include/config.h
r9ab9c2ec rf8ddd17 38 38 #define PAGE_WIDTH 12 39 39 #define PAGE_SIZE (1<<PAGE_WIDTH) 40 #define PAGE_COLOR_BITS 0 /* dummy */ 40 41 41 42 #endif -
uspace/libc/arch/ppc64/include/config.h
r9ab9c2ec rf8ddd17 38 38 #define PAGE_WIDTH 12 39 39 #define PAGE_SIZE (1<<PAGE_WIDTH) 40 #define PAGE_COLOR_BITS 0 /* dummy */ 40 41 41 42 #endif -
uspace/libc/arch/sparc64/include/config.h
r9ab9c2ec rf8ddd17 38 38 #define PAGE_WIDTH 13 39 39 #define PAGE_SIZE (1<<PAGE_WIDTH) 40 #define PAGE_COLOR_BITS 1 /**< Bit 13 is the page color. */ 40 41 41 42 #endif -
uspace/libc/generic/as.c
r9ab9c2ec rf8ddd17 38 38 #include <align.h> 39 39 #include <types.h> 40 #include <bitops.h> 40 41 41 42 /** … … 54 55 void *as_area_create(void *address, size_t size, int flags) 55 56 { 56 return (void *) __SYSCALL3(SYS_AS_AREA_CREATE, (sysarg_t ) address, (sysarg_t) size, (sysarg_t) flags); 57 return (void *) __SYSCALL3(SYS_AS_AREA_CREATE, (sysarg_t ) address, 58 (sysarg_t) size, (sysarg_t) flags); 57 59 } 58 60 59 61 /** Resize address space area. 60 62 * 61 * @param address Virtual address pointing into already existing address space area. 63 * @param address Virtual address pointing into already existing address space 64 * area. 62 65 * @param size New requested size of the area. 63 66 * @param flags Currently unused. … … 67 70 int as_area_resize(void *address, size_t size, int flags) 68 71 { 69 return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address, (sysarg_t) size, (sysarg_t) flags); 72 return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address, (sysarg_t) 73 size, (sysarg_t) flags); 70 74 } 71 75 72 76 /** Destroy address space area. 73 77 * 74 * @param address Virtual address pointing into the address space area being destroyed. 78 * @param address Virtual address pointing into the address space area being 79 * destroyed. 75 80 * 76 81 * @return Zero on success or a code from @ref errno.h on failure. … … 134 139 /* Return pointer to area not managed by sbrk */ 135 140 return ((void *) &_heap + maxheapsize); 136 137 141 } 138 142 139 143 /** Return pointer to some unmapped area, where fits new as_area 140 144 * 145 * @param sz Requested size of the allocation. 146 * @param color Requested virtual color of the allocation. 147 * 148 * @return Pointer to the beginning 149 * 141 150 * TODO: make some first_fit/... algorithm, we are now just incrementing 142 151 * the pointer to last area 143 152 */ 144 void * as_get_mappable_page(size_t sz) 153 #include <stdio.h> 154 void *as_get_mappable_page(size_t sz, int color) 145 155 { 146 156 void *res; 157 uint64_t asz; 158 int i; 159 160 if (!sz) 161 return NULL; 162 163 asz = 1 << (fnzb64(sz - 1) + 1); 147 164 148 165 /* Set heapsize to some meaningful value */ … … 150 167 set_maxheapsize(MAX_HEAP_SIZE); 151 168 152 if (!last_allocated) 153 last_allocated = (void *) ALIGN_UP((void *) &_heap + maxheapsize, PAGE_SIZE); 154 155 sz = ALIGN_UP(sz, PAGE_SIZE); 169 /* 170 * Make sure we allocate from naturally aligned address and a page of 171 * appropriate color. 172 */ 173 i = 0; 174 do { 175 if (!last_allocated) { 176 last_allocated = (void *) ALIGN_UP((void *) &_heap + 177 maxheapsize, asz); 178 } else { 179 last_allocated = (void *) ALIGN_UP(((uintptr_t) 180 last_allocated) + (int) (i > 0), asz); 181 } 182 } while ((asz < (1 << (PAGE_COLOR_BITS + PAGE_WIDTH))) && 183 (PAGE_COLOR((uintptr_t) last_allocated) != color) && 184 (++i < (1 << PAGE_COLOR_BITS))); 185 156 186 res = last_allocated; 157 last_allocated += sz;187 last_allocated += ALIGN_UP(sz, PAGE_SIZE); 158 188 159 189 return res; -
uspace/libc/generic/mman.c
r9ab9c2ec rf8ddd17 40 40 { 41 41 if (!start) 42 start = as_get_mappable_page(length );42 start = as_get_mappable_page(length, 0); 43 43 44 44 // if (! ((flags & MAP_SHARED) ^ (flags & MAP_PRIVATE))) -
uspace/libc/generic/time.c
r9ab9c2ec rf8ddd17 41 41 #include <atomic.h> 42 42 #include <futex.h> 43 #include <sysinfo.h> 43 44 #include <ipc/services.h> 44 45 … … 72 73 73 74 if (!ktime) { 74 mapping = as_get_mappable_page(PAGE_SIZE); 75 mapping = as_get_mappable_page(PAGE_SIZE, (int) 76 sysinfo_value("clock.fcolor")); 75 77 /* Get the mapping of kernel clock */ 76 res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, (sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL, &rights, NULL); 78 res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, (sysarg_t) 79 mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL, &rights, 80 NULL); 77 81 if (res) { 78 82 printf("Failed to initialize timeofday memarea\n"); -
uspace/libc/include/as.h
r9ab9c2ec rf8ddd17 40 40 #include <kernel/arch/mm/as.h> 41 41 #include <kernel/mm/as.h> 42 #include <libarch/config.h> 43 44 #define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1)) 42 45 43 46 extern void *as_area_create(void *address, size_t size, int flags); … … 45 48 extern int as_area_destroy(void *address); 46 49 extern void *set_maxheapsize(size_t mhs); 47 extern void * as_get_mappable_page(size_t sz );50 extern void * as_get_mappable_page(size_t sz, int color); 48 51 49 52 #endif -
uspace/ns/ns.c
r9ab9c2ec rf8ddd17 84 84 static void *klogaddr = NULL; 85 85 86 static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, void **addr)86 static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, char *colstr, void **addr) 87 87 { 88 88 void *ph_addr; 89 int ph_color; 89 90 90 91 if (!*addr) { 91 ph_addr = (void *) sysinfo_value(name);92 ph_addr = (void *) sysinfo_value(name); 92 93 if (!ph_addr) { 93 94 ipc_answer_fast(callid, ENOENT, 0, 0); 94 95 return; 95 96 } 96 *addr = as_get_mappable_page(PAGE_SIZE); 97 ph_color = (int) sysinfo_value(colstr); 98 *addr = as_get_mappable_page(PAGE_SIZE, ph_color); 97 99 physmem_map(ph_addr, *addr, 1, AS_AREA_READ | AS_AREA_CACHEABLE); 98 100 } … … 117 119 switch (IPC_GET_ARG3(call)) { 118 120 case SERVICE_MEM_REALTIME: 119 get_as_area(callid, &call, "clock.faddr", &clockaddr); 121 get_as_area(callid, &call, "clock.faddr", 122 "clock.fcolor", &clockaddr); 120 123 break; 121 124 case SERVICE_MEM_KLOG: 122 get_as_area(callid, &call, "klog.faddr", &klogaddr); 125 get_as_area(callid, &call, "klog.faddr", 126 "klog.fcolor", &klogaddr); 123 127 break; 124 128 default: -
uspace/rd/rd.c
r9ab9c2ec rf8ddd17 74 74 size_t rd_size = sysinfo_value("rd.size"); 75 75 void * rd_ph_addr = (void *) sysinfo_value("rd.address.physical"); 76 int rd_color = (int) sysinfo_value("rd.address.color"); 76 77 77 78 if (rd_size == 0) 78 79 return false; 79 80 80 void * rd_addr = as_get_mappable_page(rd_size );81 void * rd_addr = as_get_mappable_page(rd_size, rd_color); 81 82 82 83 physmem_map(rd_ph_addr, rd_addr, ALIGN_UP(rd_size, PAGE_SIZE) >> PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE);
Note:
See TracChangeset
for help on using the changeset viewer.