Changeset 7e7c8747 in mainline


Ignore:
Timestamp:
2006-12-17T12:11:00Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
95155b0c
Parents:
771cd22
Message:

More formatting and indentation changes.

Location:
kernel/arch/sparc64/src
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/src/cpu/cpu.c

    r771cd22 r7e7c8747  
    4545#include <macros.h>
    4646
    47 /** Perform sparc64 specific initialization of the processor structure for the current processor. */
     47/** Perform sparc64 specific initialization of the processor structure for the
     48 * current processor.
     49 */
    4850void cpu_arch_init(void)
    4951{
     
    6769                        mid = *((uint32_t *) prop->value);
    6870                        if (mid == CPU->arch.mid) {
    69                                 prop = ofw_tree_getprop(node, "clock-frequency");
     71                                prop = ofw_tree_getprop(node,
     72                                        "clock-frequency");
    7073                                if (prop && prop->value)
    71                                         clock_frequency = *((uint32_t *) prop->value);
     74                                        clock_frequency = *((uint32_t *)
     75                                                prop->value);
    7276                        }
    7377                }
     
    8185         * Lock CPU stack in DTLB.
    8286         */
    83         uintptr_t base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
     87        uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
    8488                 
    85         if (!overlaps((uintptr_t) CPU->stack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
     89        if (!overlaps((uintptr_t) CPU->stack, PAGE_SIZE, base, (1 <<
     90                KERNEL_PAGE_WIDTH))) {
    8691                /*
    8792                 * Kernel stack of this processor is not locked in DTLB.
     
    8994                 * Second, create a locked mapping for it.
    9095                 */
    91                 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) CPU->stack);
    92                 dtlb_insert_mapping((uintptr_t) CPU->stack, KA2PA(CPU->stack), PAGESIZE_8K, true, true);
     96                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
     97                        CPU->stack);
     98                dtlb_insert_mapping((uintptr_t) CPU->stack, KA2PA(CPU->stack),
     99                        PAGESIZE_8K, true, true);
    93100        }
    94101}
     
    104111 * This function is called by the bootstrap processor.
    105112 *
    106  * @param m Processor structure of the CPU for which version information is to be printed.
     113 * @param m Processor structure of the CPU for which version information is to
     114 *      be printed.
    107115 */
    108116void cpu_print_report(cpu_t *m)
     
    152160        }
    153161
    154         printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n",
    155                 m->id, manuf, impl, m->arch.ver.mask, m->arch.clock_frequency/1000000);
     162        printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n", m->id, manuf,
     163                impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000);
    156164}
    157165
  • kernel/arch/sparc64/src/proc/scheduler.c

    r771cd22 r7e7c8747  
    5252/** Perform sparc64 specific steps before scheduling a thread.
    5353 *
    54  * Ensure that thread's kernel stack, as well as userspace window
    55  * buffer for userspace threads, are locked in DTLB.
    56  * For userspace threads, initialize reserved global registers
    57  * in the alternate and interrupt sets.
     54 * Ensure that thread's kernel stack, as well as userspace window buffer for
     55 * userspace threads, are locked in DTLB. For userspace threads, initialize
     56 * reserved global registers in the alternate and interrupt sets.
    5857 */
    5958void before_thread_runs_arch(void)
     
    6362        base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
    6463
    65         if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
     64        if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 <<
     65                KERNEL_PAGE_WIDTH))) {
    6666                /*
    6767                 * Kernel stack of this thread is not locked in DTLB.
     
    6969                 * If not, create a locked mapping for it.
    7070                 */
    71                 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
    72                 dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
     71                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
     72                        THREAD->kstack);
     73                dtlb_insert_mapping((uintptr_t) THREAD->kstack,
     74                        KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
    7375        }
    7476       
     
    7981                 */
    8082                ASSERT(THREAD->arch.uspace_window_buffer);
    81                 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
    82                 if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
     83                uintptr_t uw_buf = ALIGN_DOWN((uintptr_t)
     84                        THREAD->arch.uspace_window_buffer, PAGE_SIZE);
     85                if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH))
     86                        {
    8387                        /*
    84                          * The buffer is not covered by the 4M locked kernel DTLB entry.
     88                         * The buffer is not covered by the 4M locked kernel
     89                         * DTLB entry.
    8590                         */
    8691                        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
    87                         dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true);
     92                        dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K,
     93                                true, true);
    8894                }
    8995               
    9096                /*
    91                  * Write kernel stack address to %g6 and a pointer to the last item
    92                  * in the userspace window buffer to %g7 in the alternate and interrupt sets.
     97                 * Write kernel stack address to %g6 and a pointer to the last
     98                 * item in the userspace window buffer to %g7 in the alternate
     99                 * and interrupt sets.
    93100                 */
    94101                uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
    95                         - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
     102                        - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE,
     103                        STACK_ALIGNMENT));
    96104                write_to_ig_g6(sp);
    97105                write_to_ag_g6(sp);
     
    109117        uintptr_t base;
    110118
    111         base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
     119        base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
    112120
    113         if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
     121        if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 <<
     122                KERNEL_PAGE_WIDTH))) {
    114123                /*
    115124                 * Kernel stack of this thread is locked in DTLB.
    116125                 * Destroy the mapping.
    117126                 */
    118                 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
     127                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
     128                        THREAD->kstack);
    119129        }
    120130       
     
    126136                ASSERT(THREAD->arch.uspace_window_buffer);
    127137               
    128                 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
    129                 if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
     138                uintptr_t uw_buf = ALIGN_DOWN((uintptr_t)
     139                        THREAD->arch.uspace_window_buffer, PAGE_SIZE);
     140                if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
    130141                        /*
    131142                         * The buffer is not covered by the 4M locked kernel DTLB entry
  • kernel/arch/sparc64/src/proc/thread.c

    r771cd22 r7e7c8747  
    5555                 * belonging to a killed thread.
    5656                 */
    57                 frame_free(KA2PA(ALIGN_DOWN((uintptr_t) t->arch.uspace_window_buffer, PAGE_SIZE)));
     57                frame_free(KA2PA(ALIGN_DOWN((uintptr_t)
     58                        t->arch.uspace_window_buffer, PAGE_SIZE)));
    5859        }
    5960}
     
    6162void thread_create_arch(thread_t *t)
    6263{
    63         if ((t->flags & THREAD_FLAG_USPACE) && (!t->arch.uspace_window_buffer)) {
     64        if ((t->flags & THREAD_FLAG_USPACE) && (!t->arch.uspace_window_buffer))
     65                {
    6466                /*
    6567                 * The thread needs userspace window buffer and the object
     
    7476                 * belonging to a killed thread.
    7577                 */
    76                  t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf, PAGE_SIZE);
     78                 t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf,
     79                        PAGE_SIZE);
    7780        }
    7881}
  • kernel/arch/sparc64/src/start.S

    r771cd22 r7e7c8747  
    7878
    7979        wrpr %g0, NWINDOWS - 2, %cansave        ! set maximum saveable windows
    80         wrpr %g0, 0, %canrestore                ! get rid of windows we will never need again
    81         wrpr %g0, 0, %otherwin                  ! make sure the window state is consistent
    82         wrpr %g0, NWINDOWS - 1, %cleanwin       ! prevent needless clean_window traps for kernel
    83 
    84         wrpr %g0, 0, %tl                        ! TL = 0, primary context register is used
    85 
    86         wrpr %g0, PSTATE_PRIV_BIT, %pstate      ! Disable interrupts and disable 32-bit address masking.
     80        wrpr %g0, 0, %canrestore                ! get rid of windows we will
     81                                                ! never need again
     82        wrpr %g0, 0, %otherwin                  ! make sure the window state is
     83                                                ! consistent
     84        wrpr %g0, NWINDOWS - 1, %cleanwin       ! prevent needless clean_window
     85                                                ! traps for kernel
     86
     87        wrpr %g0, 0, %tl                        ! TL = 0, primary context
     88                                                ! register is used
     89
     90        wrpr %g0, PSTATE_PRIV_BIT, %pstate      ! disable interrupts and disable
     91                                                ! 32-bit address masking
    8792
    8893        wrpr %g0, 0, %pil                       ! intialize %pil
     
    95100
    96101        /*
    97          * Take over the DMMU by installing global locked
    98          * TTE entry identically mapping the first 4M
    99          * of memory.
     102         * Take over the DMMU by installing global locked TTE entry identically
     103         * mapping the first 4M of memory.
    100104         *
    101          * In case of DMMU, no FLUSH instructions need to be
    102          * issued. Because of that, the old DTLB contents can
    103          * be demapped pretty straightforwardly and without
    104          * causing any traps.
     105         * In case of DMMU, no FLUSH instructions need to be issued. Because of
     106         * that, the old DTLB contents can be demapped pretty straightforwardly
     107         * and without causing any traps.
    105108         */
    106109
     
    108111
    109112#define SET_TLB_DEMAP_CMD(r1, context_id) \
    110         set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1
     113        set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
     114                TLB_DEMAP_CONTEXT_SHIFT), %r1
    111115       
    112116        ! demap context 0
     
    116120
    117121#define SET_TLB_TAG(r1, context) \
    118         set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
     122        set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
    119123
    120124        ! write DTLB tag
     
    145149
    146150        /*
    147          * Because we cannot use global mappings (because we want to
    148          * have separate 64-bit address spaces for both the kernel
    149          * and the userspace), we prepare the identity mapping also in
    150          * context 1. This step is required by the
    151          * code installing the ITLB mapping.
     151         * Because we cannot use global mappings (because we want to have
     152         * separate 64-bit address spaces for both the kernel and the
     153         * userspace), we prepare the identity mapping also in context 1. This
     154         * step is required by the code installing the ITLB mapping.
    152155         */
    153156        ! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
     
    162165       
    163166        /*
    164          * Now is time to take over the IMMU.
    165          * Unfortunatelly, it cannot be done as easily as the DMMU,
    166          * because the IMMU is mapping the code it executes.
     167         * Now is time to take over the IMMU. Unfortunatelly, it cannot be done
     168         * as easily as the DMMU, because the IMMU is mapping the code it
     169         * executes.
    167170         *
    168          * [ Note that brave experiments with disabling the IMMU
    169          * and using the DMMU approach failed after a dozen
    170          * of desparate days with only little success. ]
     171         * [ Note that brave experiments with disabling the IMMU and using the
     172         * DMMU approach failed after a dozen of desparate days with only little
     173         * success. ]
    171174         *
    172          * The approach used here is inspired from OpenBSD.
    173          * First, the kernel creates IMMU mapping for itself
    174          * in context 1 (MEM_CONTEXT_TEMP) and switches to
    175          * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
    176          * afterwards and replaced with the kernel permanent
    177          * mapping. Finally, the kernel switches back to
    178          * context 0 and demaps context 1.
     175         * The approach used here is inspired from OpenBSD. First, the kernel
     176         * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
     177         * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
     178         * afterwards and replaced with the kernel permanent mapping. Finally,
     179         * the kernel switches back to context 0 and demaps context 1.
    179180         *
    180          * Moreover, the IMMU requires use of the FLUSH instructions.
    181          * But that is OK because we always use operands with
    182          * addresses already mapped by the taken over DTLB.
     181         * Moreover, the IMMU requires use of the FLUSH instructions. But that
     182         * is OK because we always use operands with addresses already mapped by
     183         * the taken over DTLB.
    183184         */
    184185       
     
    292293#ifdef CONFIG_SMP
    293294        /*
    294          * Active loop for APs until the BSP picks them up.
    295          * A processor cannot leave the loop until the
    296          * global variable 'waking_up_mid' equals its
     295         * Active loop for APs until the BSP picks them up. A processor cannot
     296         * leave the loop until the global variable 'waking_up_mid' equals its
    297297         * MID.
    298298         */
     
    327327
    328328/*
    329  * Create small stack to be used by the bootstrap processor.
    330  * It is going to be used only for a very limited period of
    331  * time, but we switch to it anyway, just to be sure we are
    332  * properly initialized.
     329 * Create small stack to be used by the bootstrap processor. It is going to be
     330 * used only for a very limited period of time, but we switch to it anyway,
     331 * just to be sure we are properly initialized.
    333332 *
    334  * What is important is that this piece of memory is covered
    335  * by the 4M DTLB locked entry and therefore there will be
    336  * no surprises like deadly combinations of spill trap and
    337  * and TLB miss on the stack address.
     333 * What is important is that this piece of memory is covered by the 4M DTLB
     334 * locked entry and therefore there will be no surprises like deadly
     335 * combinations of spill trap and and TLB miss on the stack address.
    338336 */
    339337
     
    355353
    356354/*
    357  * This variable is used by the fast_data_MMU_miss trap handler.
    358  * In runtime, it is further modified to reflect the starting address of
    359  * physical memory.
     355 * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
     356 * is further modified to reflect the starting address of physical memory.
    360357 */
    361358.global kernel_8k_tlb_data_template
    362359kernel_8k_tlb_data_template:
    363360#ifdef CONFIG_VIRT_IDX_DCACHE
    364         .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_CV | TTE_P | TTE_W)
     361        .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
     362                 TTE_CV | TTE_P | TTE_W)
    365363#else /* CONFIG_VIRT_IDX_DCACHE */
    366         .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_P | TTE_W)
     364        .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
     365                TTE_P | TTE_W)
    367366#endif /* CONFIG_VIRT_IDX_DCACHE */
     367
Note: See TracChangeset for help on using the changeset viewer.