Changeset 771cd22 in mainline


Ignore:
Timestamp:
2006-12-16T19:07:02Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
7e7c8747
Parents:
1ecdbb0
Message:

Formatting and indentation changes.

Files:
14 edited

Legend:

Unmodified
Added
Removed
  • boot/genarch/ofw.c

    r1ecdbb0 r771cd22  
    234234}
    235235
    236 
     236/** Save OpenFirmware physical memory map.
     237 *
     238 * @param map Memory map structure where the map will be saved.
     239 *
     240 * @return Zero on failure, non-zero on success.
     241 */
    237242int ofw_memmap(memmap_t *map)
    238243{
     
    240245        unsigned int sc = ofw_get_size_cells(ofw_memory);
    241246
    242         uint32_t buf[((ac+sc)*MEMMAP_MAX_RECORDS)];
     247        uint32_t buf[((ac + sc) * MEMMAP_MAX_RECORDS)];
    243248        int ret = ofw_get_property(ofw_memory, "reg", buf, sizeof(buf));
    244249        if (ret <= 0)           /* ret is the number of written bytes */
     
    248253        map->total = 0;
    249254        map->count = 0;
    250         for (pos = 0; (pos < ret / sizeof(uint32_t)) && (map->count < MEMMAP_MAX_RECORDS); pos += ac + sc) {
     255        for (pos = 0; (pos < ret / sizeof(uint32_t)) && (map->count <
     256                MEMMAP_MAX_RECORDS); pos += ac + sc) {
    251257                void * start = (void *) ((uintptr_t) buf[pos + ac - 1]);
    252258                unsigned int size = buf[pos + ac + sc - 1];
  • kernel/arch/sparc64/include/mm/page.h

    r1ecdbb0 r771cd22  
    4141#define PAGE_SIZE       FRAME_SIZE
    4242
    43 #define PAGE_COLOR_BITS 1                       /**< 14 - 13; 2^14 == 16K == alias boundary. */
     43#define PAGE_COLOR_BITS 1       /**< 14 - 13; 2^14 == 16K == alias boundary. */
    4444
    4545#ifdef KERNEL
  • kernel/arch/sparc64/include/mm/tsb.h

    r1ecdbb0 r771cd22  
    4343 * in TLBs - only one TLB entry will do.
    4444 */
    45 #define TSB_SIZE                        2       /* when changing this, change as.c as well */
    46 #define ITSB_ENTRY_COUNT                (512*(1<<TSB_SIZE))
    47 #define DTSB_ENTRY_COUNT                (512*(1<<TSB_SIZE))
     45#define TSB_SIZE                        2       /* when changing this, change
     46                                                 * as.c as well */
     47#define ITSB_ENTRY_COUNT                (512 * (1 << TSB_SIZE))
     48#define DTSB_ENTRY_COUNT                (512 * (1 << TSB_SIZE))
    4849
    4950#define TSB_TAG_TARGET_CONTEXT_SHIFT    48
     
    8182        struct {
    8283                uint64_t base : 51;     /**< TSB base address, bits 63:13. */
    83                 unsigned split : 1;     /**< Split vs. common TSB for 8K and 64K pages.
    84                                           *  HelenOS uses only 8K pages for user mappings,
    85                                           *  so we always set this to 0.
    86                                           */
     84                unsigned split : 1;     /**< Split vs. common TSB for 8K and 64K
     85                                         * pages. HelenOS uses only 8K pages
     86                                         * for user mappings, so we always set
     87                                         * this to 0.
     88                                         */
    8789                unsigned : 9;
    88                 unsigned size : 3;      /**< TSB size. Number of entries is 512*2^size. */
     90                unsigned size : 3;      /**< TSB size. Number of entries is
     91                                         * 512 * 2^size. */
    8992        } __attribute__ ((packed));
    9093};
  • kernel/arch/sparc64/src/mm/as.c

    r1ecdbb0 r771cd22  
    6262{
    6363#ifdef CONFIG_TSB
    64         int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH);
     64        int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
     65                sizeof(tsb_entry_t)) >> FRAME_WIDTH);
    6566        uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
    6667
     
    6970
    7071        as->arch.itsb = (tsb_entry_t *) tsb;
    71         as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t));
    72         memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0);
     72        as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
     73                sizeof(tsb_entry_t));
     74        memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
     75                * sizeof(tsb_entry_t), 0);
    7376#endif
    7477        return 0;
     
    7881{
    7982#ifdef CONFIG_TSB
    80         count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH;
     83        count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
     84                sizeof(tsb_entry_t)) >> FRAME_WIDTH;
    8185        frame_free(KA2PA((uintptr_t) as->arch.itsb));
    8286        return cnt;
     
    100104}
    101105
    102 /** Perform sparc64-specific tasks when an address space becomes active on the processor.
     106/** Perform sparc64-specific tasks when an address space becomes active on the
     107 * processor.
    103108 *
    104109 * Install ASID and map TSBs.
     
    135140        uintptr_t tsb = (uintptr_t) as->arch.itsb;
    136141               
    137         if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
     142        if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
    138143                /*
    139144                 * TSBs were allocated from memory not covered
     
    161166}
    162167
    163 /** Perform sparc64-specific tasks when an address space is removed from the processor.
     168/** Perform sparc64-specific tasks when an address space is removed from the
     169 * processor.
    164170 *
    165171 * Demap TSBs.
     
    184190        uintptr_t tsb = (uintptr_t) as->arch.itsb;
    185191               
    186         if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
     192        if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
    187193                /*
    188194                 * TSBs were allocated from memory not covered
  • kernel/arch/sparc64/src/mm/frame.c

    r1ecdbb0 r771cd22  
    6565                        if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
    6666                                confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
    67                         zone_create(ADDR2PFN(start), SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), confdata, 0);
    68                         last_frame = max(last_frame, start + ALIGN_UP(size, FRAME_SIZE));
     67                        zone_create(ADDR2PFN(start),
     68                                SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
     69                                confdata, 0);
     70                        last_frame = max(last_frame, start + ALIGN_UP(size,
     71                                FRAME_SIZE));
    6972                }
    7073
     
    7275                 * On sparc64, physical memory can start on a non-zero address.
    7376                 * The generic frame_init() only marks PFN 0 as not free, so we
    74                  * must mark the physically first frame not free explicitly here,
    75                  * no matter what is its address.
     77                 * must mark the physically first frame not free explicitly
     78                 * here, no matter what is its address.
    7679                 */
    7780                frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);
  • kernel/arch/sparc64/src/mm/page.c

    r1ecdbb0 r771cd22  
    4646/** Entries locked in DTLB of BSP.
    4747 *
    48  * Application processors need to have the same locked entries
    49  * in their DTLBs as the bootstrap processor.
     48 * Application processors need to have the same locked entries in their DTLBs as
     49 * the bootstrap processor.
    5050 */
    5151static struct {
     
    8585/** Map memory-mapped device into virtual memory.
    8686 *
    87  * So far, only DTLB is used to map devices into memory.
    88  * Chances are that there will be only a limited amount of
    89  * devices that the kernel itself needs to lock in DTLB.
     87 * So far, only DTLB is used to map devices into memory. Chances are that there
     88 * will be only a limited amount of devices that the kernel itself needs to
     89 * lock in DTLB.
    9090 *
    91  * @param physaddr Physical address of the page where the
    92  *                 device is located. Must be at least
    93  *                 page-aligned.
    94  * @param size Size of the device's registers. Must not
    95  *             exceed 4M and must include extra space
    96  *             caused by the alignment.
     91 * @param physaddr Physical address of the page where the device is located.
     92 *      Must be at least page-aligned.
     93 * @param size Size of the device's registers. Must not exceed 4M and must
     94 *      include extra space caused by the alignment.
    9795 *
    98  * @return Virtual address of the page where the device is
    99  *         mapped.
     96 * @return Virtual address of the page where the device is mapped.
    10097 */
    10198uintptr_t hw_map(uintptr_t physaddr, size_t size)
     
    115112                { PAGESIZE_8K, PAGE_SIZE, 4 },          /* 32K */
    116113                { PAGESIZE_64K, 0, 1},                  /* 64K */
    117                 { PAGESIZE_64K, 8*PAGE_SIZE, 2 },       /* 128K */
    118                 { PAGESIZE_64K, 8*PAGE_SIZE, 4 },       /* 256K */
     114                { PAGESIZE_64K, 8 * PAGE_SIZE, 2 },     /* 128K */
     115                { PAGESIZE_64K, 8 * PAGE_SIZE, 4 },     /* 256K */
    119116                { PAGESIZE_512K, 0, 1 },                /* 512K */
    120                 { PAGESIZE_512K, 64*PAGE_SIZE, 2 },     /* 1M */
    121                 { PAGESIZE_512K, 64*PAGE_SIZE, 4 },     /* 2M */
     117                { PAGESIZE_512K, 64 * PAGE_SIZE, 2 },   /* 1M */
     118                { PAGESIZE_512K, 64 * PAGE_SIZE, 4 },   /* 2M */
    122119                { PAGESIZE_4M, 0, 1 },                  /* 4M */
    123                 { PAGESIZE_4M, 512*PAGE_SIZE, 2 }       /* 8M */
     120                { PAGESIZE_4M, 512 * PAGE_SIZE, 2 }     /* 8M */
    124121        };
    125122       
    126123        ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);
    127         ASSERT(size <= 8*1024*1024);
     124        ASSERT(size <= 8 * 1024 * 1024);
    128125       
    129126        if (size <= FRAME_SIZE)
     
    145142                 * First, insert the mapping into DTLB.
    146143                 */
    147                 dtlb_insert_mapping(virtaddr + i*sizemap[order].increment,
    148                                     physaddr + i*sizemap[order].increment,
    149                                     sizemap[order].pagesize_code, true, false);
     144                dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
     145                        physaddr + i * sizemap[order].increment,
     146                        sizemap[order].pagesize_code, true, false);
    150147       
    151148#ifdef CONFIG_SMP       
     
    154151                 */
    155152                bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
    156                         virtaddr + i*sizemap[order].increment;
     153                        virtaddr + i * sizemap[order].increment;
    157154                bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
    158                         physaddr + i*sizemap[order].increment;
     155                        physaddr + i * sizemap[order].increment;
    159156                bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
    160157                        sizemap[order].pagesize_code;
  • kernel/arch/sparc64/src/mm/tlb.c

    r1ecdbb0 r771cd22  
    5858static void dtlb_pte_copy(pte_t *t, bool ro);
    5959static void itlb_pte_copy(pte_t *t);
    60 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
    61 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
    62 static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
     60static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
     61        char *str);
     62static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
     63         tlb_tag_access_reg_t tag, const char *str);
     64static void do_fast_data_access_protection_fault(istate_t *istate,
     65        tlb_tag_access_reg_t tag, const char *str);
    6366
    6467char *context_encoding[] = {
     
    9194 * @param cacheable True if the mapping is cacheable, false otherwise.
    9295 */
    93 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
     96void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
     97        locked, bool cacheable)
    9498{
    9599        tlb_tag_access_reg_t tag;
     
    125129 *
    126130 * @param t Page Table Entry to be copied.
    127  * @param ro If true, the entry will be created read-only, regardless of its w field.
     131 * @param ro If true, the entry will be created read-only, regardless of its w
     132 *      field.
    128133 */
    129134void dtlb_pte_copy(pte_t *t, bool ro)
     
    213218        } else {
    214219                /*
    215                  * Forward the page fault to the address space page fault handler.
     220                 * Forward the page fault to the address space page fault
     221                 * handler.
    216222                 */             
    217223                page_table_unlock(AS, true);
    218224                if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
    219                         do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
     225                        do_fast_instruction_access_mmu_miss_fault(istate,
     226                                __FUNCTION__);
    220227                }
    221228        }
     
    224231/** DTLB miss handler.
    225232 *
    226  * Note that some faults (e.g. kernel faults) were already resolved
    227  * by the low-level, assembly language part of the fast_data_access_mmu_miss
    228  * handler.
     233 * Note that some faults (e.g. kernel faults) were already resolved by the
     234 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
    229235 */
    230236void fast_data_access_mmu_miss(int n, istate_t *istate)
     
    240246                if (!tag.vpn) {
    241247                        /* NULL access in kernel */
    242                         do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
    243                 }
    244                 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault.");
     248                        do_fast_data_access_mmu_miss_fault(istate, tag,
     249                                __FUNCTION__);
     250                }
     251                do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
     252                        "kernel page fault.");
    245253        }
    246254
     
    264272                page_table_unlock(AS, true);
    265273                if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
    266                         do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
     274                        do_fast_data_access_mmu_miss_fault(istate, tag,
     275                                __FUNCTION__);
    267276                }
    268277        }
     
    283292        if (t && PTE_WRITABLE(t)) {
    284293                /*
    285                  * The mapping was found in the software page hash table and is writable.
    286                  * Demap the old mapping and insert an updated mapping into DTLB.
     294                 * The mapping was found in the software page hash table and is
     295                 * writable. Demap the old mapping and insert an updated mapping
     296                 * into DTLB.
    287297                 */
    288298                t->a = true;
     
    296306        } else {
    297307                /*
    298                  * Forward the page fault to the address space page fault handler.
     308                 * Forward the page fault to the address space page fault
     309                 * handler.
    299310                 */             
    300311                page_table_unlock(AS, true);
    301312                if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
    302                         do_fast_data_access_protection_fault(istate, tag, __FUNCTION__);
     313                        do_fast_data_access_protection_fault(istate, tag,
     314                                __FUNCTION__);
    303315                }
    304316        }
     
    317329                t.value = itlb_tag_read_read(i);
    318330               
    319                 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
    320                         i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
     331                printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
     332                        "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
     333                        "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
     334                        t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
     335                        d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
    321336        }
    322337
     
    326341                t.value = dtlb_tag_read_read(i);
    327342               
    328                 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
    329                         i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
    330         }
    331 
    332 }
    333 
    334 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
     343                printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
     344                        "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
     345                        "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
     346                        t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
     347                        d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
     348        }
     349
     350}
     351
     352void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
     353        *str)
    335354{
    336355        fault_if_from_uspace(istate, "%s\n", str);
     
    339358}
    340359
    341 void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
     360void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
     361        tag, const char *str)
    342362{
    343363        uintptr_t va;
     
    345365        va = tag.vpn << PAGE_WIDTH;
    346366
    347         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
     367        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
     368                tag.context);
    348369        dump_istate(istate);
    349370        printf("Faulting page: %p, ASID=%d\n", va, tag.context);
     
    351372}
    352373
    353 void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
     374void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
     375        tag, const char *str)
    354376{
    355377        uintptr_t va;
     
    357379        va = tag.vpn << PAGE_WIDTH;
    358380
    359         fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
     381        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
     382                tag.context);
    360383        printf("Faulting page: %p, ASID=%d\n", va, tag.context);
    361384        dump_istate(istate);
     
    371394        sfar = dtlb_sfar_read();
    372395       
    373         printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, fv=%d\n",
    374                 sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
     396        printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
     397                "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
     398                sfsr.ow, sfsr.fv);
    375399        printf("DTLB SFAR: address=%p\n", sfar);
    376400       
     
    407431}
    408432
    409 /** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
     433/** Invalidate all ITLB and DTLB entries that belong to specified ASID
     434 * (Context).
    410435 *
    411436 * @param asid Address Space ID.
     
    430455}
    431456
    432 /** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
     457/** Invalidate all ITLB and DTLB entries for specified page range in specified
     458 * address space.
    433459 *
    434460 * @param asid Address Space ID.
     
    449475       
    450476        for (i = 0; i < cnt; i++) {
    451                 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
    452                 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
     477                itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
     478                        PAGE_SIZE);
     479                dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
     480                        PAGE_SIZE);
    453481        }
    454482       
  • kernel/arch/sparc64/src/mm/tsb.c

    r1ecdbb0 r771cd22  
    4242#include <debug.h>
    4343
    44 #define TSB_INDEX_MASK          ((1<<(21+1+TSB_SIZE-PAGE_WIDTH))-1)
     44#define TSB_INDEX_MASK          ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1)
    4545
    4646/** Invalidate portion of TSB.
    4747 *
    48  * We assume that the address space is already locked.
    49  * Note that respective portions of both TSBs
    50  * are invalidated at a time.
     48 * We assume that the address space is already locked. Note that respective
     49 * portions of both TSBs are invalidated at a time.
    5150 *
    5251 * @param as Address space.
    5352 * @param page First page to invalidate in TSB.
    54  * @param pages Number of pages to invalidate.
    55  *              Value of (count_t) -1 means the whole TSB.
     53 * @param pages Number of pages to invalidate. Value of (count_t) -1 means the
     54 *      whole TSB.
    5655 */
    5756void tsb_invalidate(as_t *as, uintptr_t page, count_t pages)
     
    6665       
    6766        for (i = 0; i < cnt; i++) {
    68                 as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT-1)].tag.invalid = true;
    69                 as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT-1)].tag.invalid = true;
     67                as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
     68                        true;
     69                as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
     70                        true;
    7071        }
    7172}
  • kernel/arch/sparc64/src/smp/ipi.c

    r1ecdbb0 r771cd22  
    6262
    6363        /*
    64          * This functin might enable interrupts for a while.
     64         * This function might enable interrupts for a while.
    6565         * In order to prevent migration to another processor,
    6666         * we explicitly disable preemption.
  • kernel/arch/sparc64/src/sparc64.c

    r1ecdbb0 r771cd22  
    5151bootinfo_t bootinfo;
    5252
     53/** Perform sparc64 specific initialization before main_bsp() is called. */
    5354void arch_pre_main(void)
    5455{
     
    7071}
    7172
     73/** Perform sparc64 specific initialization before mm is initialized. */
    7274void arch_pre_mm_init(void)
    7375{
     
    7678}
    7779
     80/** Perform sparc64 specific initialization afterr mm is initialized. */
    7881void arch_post_mm_init(void)
    7982{
    8083        if (config.cpu_active == 1) {
    81                 irq_init(1<<11, 128);
     84                /*
     85                 * We have 2^11 different interrupt vectors.
     86                 * But we only create 128 buckets.
     87                 */
     88                irq_init(1 << 11, 128);
     89               
    8290                standalone_sparc64_console_init();
    8391        }
  • kernel/generic/include/macros.h

    r1ecdbb0 r771cd22  
    3939#include <typedefs.h>
    4040
    41 #define is_digit(d)             (((d) >= '0') && ((d) <= '9'))
    42 #define is_lower(c)             (((c) >= 'a') && ((c) <= 'z'))
    43 #define is_upper(c)             (((c) >= 'A') && ((c) <= 'Z'))
    44 #define is_alpha(c)             (is_lower(c) || is_upper(c))
    45 #define is_alphanum(c)          (is_alpha(c) || is_digit(c))
    46 #define is_white(c)             (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || ((c) == '\r'))
     41#define is_digit(d)     (((d) >= '0') && ((d) <= '9'))
     42#define is_lower(c)     (((c) >= 'a') && ((c) <= 'z'))
     43#define is_upper(c)     (((c) >= 'A') && ((c) <= 'Z'))
     44#define is_alpha(c)     (is_lower(c) || is_upper(c))
     45#define is_alphanum(c)  (is_alpha(c) || is_digit(c))
     46#define is_white(c)     (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || \
     47                                ((c) == '\r'))
    4748
    4849#define min(a,b)        ((a) < (b) ? (a) : (b))
  • kernel/generic/src/main/main.c

    r1ecdbb0 r771cd22  
    127127#endif
    128128
    129 #define CONFIG_STACK_SIZE       ((1<<STACK_FRAMES)*STACK_SIZE)
     129#define CONFIG_STACK_SIZE       ((1 << STACK_FRAMES) * STACK_SIZE)
    130130
    131131/** Main kernel routine for bootstrap CPU.
  • kernel/generic/src/mm/frame.c

    r1ecdbb0 r771cd22  
    11391139        printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10);
    11401140        printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10);
    1141         printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
     1141        printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
    11421142        buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
    11431143       
  • kernel/generic/src/proc/task.c

    r1ecdbb0 r771cd22  
    269269/** Get accounting data of given task.
    270270 *
    271  * Note that task_lock on @t must be already held and
     271 * Note that task lock of 't' must be already held and
    272272 * interrupts must be already disabled.
    273273 *
Note: See TracChangeset for help on using the changeset viewer.