Changeset a0d74fd in mainline


Ignore:
Timestamp:
2006-03-01T11:07:04Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
9ad03fe
Parents:
2c49fbbe
Message:

ia64 work.
Provide PA2KA(identity) mapping for kernel data references via Alternate Data TLB Fault handler.
Add before_thread_runs_arch() that maps kstack, if necessary.
Add easy to use dtlb_mapping_insert() for comfortable insertion of kernel data mappings.

Location:
arch
Files:
1 added
10 edited

Legend:

Unmodified
Added
Removed
  • arch/ia64/Makefile.inc

    r2c49fbbe ra0d74fd  
    7575        arch/$(ARCH)/src/mm/page.c \
    7676        arch/$(ARCH)/src/mm/tlb.c \
     77        arch/$(ARCH)/src/proc/scheduler.c \
    7778        arch/$(ARCH)/src/drivers/it.c
  • arch/ia64/include/mm/asid.h

    r2c49fbbe ra0d74fd  
    3535
    3636typedef __u16 asid_t;
     37typedef __u32 rid_t;
     38
     39#endif  /* __ASM__ */
    3740
    3841/**
     
    4144 * but those extra bits are not used by the kernel.
    4245 */
    43 #endif
    44  
    4546#define RIDS_PER_ASID           7
     47
    4648#define RID_MAX                 262143          /* 2^18 - 1 */
     49#define RID_KERNEL              0
     50#define RID_INVALID             1
    4751
    48 #define ASID2RID(asid, vrn)     (((asid)*RIDS_PER_ASID)+(vrn))
     52#define ASID2RID(asid, vrn)     (((asid)>RIDS_PER_ASID)?(((asid)*RIDS_PER_ASID)+(vrn)):(asid))
    4953#define RID2ASID(rid)           ((rid)/RIDS_PER_ASID)
    50 
    51 #ifndef __ASM__
    52 
    53 
    54 typedef __u32 rid_t;
    55 
    56 #endif
    5754
    5855#define ASID_MAX_ARCH           (RID_MAX/RIDS_PER_ASID)
  • arch/ia64/include/mm/page.h

    r2c49fbbe ra0d74fd  
    3131#define __ia64_PAGE_H__
    3232
     33#define PAGE_SIZE       FRAME_SIZE
     34#define PAGE_WIDTH      FRAME_WIDTH
     35
     36/** Bit width of the TLB-locked portion of kernel address space. */
     37#define KERNEL_PAGE_WIDTH       28      /* 256M */
     38
     39#define SET_PTL0_ADDRESS_ARCH(x)        /**< To be removed as situation permits. */
     40
     41#define PPN_SHIFT                       12
     42
     43#define VRN_SHIFT                       61
     44#define VRN_MASK                        (7LL << VRN_SHIFT)
     45#define VA2VRN(va)                      ((va)>>VRN_SHIFT)
     46
     47#ifdef __ASM__
     48#define VRN_KERNEL                      7
     49#else
     50#define VRN_KERNEL                      7LL
     51#endif
     52
     53#define REGION_REGISTERS                8
     54
     55#define KA2PA(x)        ((__address) (x-(VRN_KERNEL<<VRN_SHIFT)))
     56#define PA2KA(x)        ((__address) (x+(VRN_KERNEL<<VRN_SHIFT)))
     57
     58#define VHPT_WIDTH                      20              /* 1M */
     59#define VHPT_SIZE                       (1 << VHPT_WIDTH)
     60#define VHPT_BASE                       0               /* Must be aligned to VHPT_SIZE */
     61
     62#define PTA_BASE_SHIFT                  15
     63
     64/** Memory Attributes. */
     65#define MA_WRITEBACK    0x0
     66#define MA_UNCACHEABLE  0x4
     67
     68/** Privilege Levels. Only the most and the least privileged ones are ever used. */
     69#define PL_KERNEL       0x0
     70#define PL_USER         0x3
     71
     72/* Access Rigths. Only certain combinations are used by the kernel. */
     73#define AR_READ         0x0
     74#define AR_EXECUTE      0x1
     75#define AR_WRITE        0x2
     76
    3377#ifndef __ASM__
    34 
    3578
    3679#include <arch/mm/frame.h>
     
    4184#include <typedefs.h>
    4285#include <debug.h>
    43 
    44 #endif
    45 
    46 #define PAGE_SIZE       FRAME_SIZE
    47 #define PAGE_WIDTH      FRAME_WIDTH
    48 #define KERNEL_PAGE_WIDTH       28
    49 
    50 
    51 
    52 #define SET_PTL0_ADDRESS_ARCH(x)        /**< To be removed as situation permits. */
    53 
    54 #define PPN_SHIFT                       12
    55 
    56 #define VRN_SHIFT                       61
    57 #define VRN_MASK                        (7LL << VRN_SHIFT)
    58 
    59 #ifdef __ASM__
    60 #define VRN_KERNEL                      7
    61 #else
    62 #define VRN_KERNEL                      7LL
    63 #endif
    64 
    65 #define REGION_REGISTERS                8
    66 
    67 #define KA2PA(x)        ((__address) (x-(VRN_KERNEL<<VRN_SHIFT)))
    68 #define PA2KA(x)        ((__address) (x+(VRN_KERNEL<<VRN_SHIFT)))
    69 
    70 
    71 #define VHPT_WIDTH                      20              /* 1M */
    72 #define VHPT_SIZE                       (1 << VHPT_WIDTH)
    73 #define VHPT_BASE                       0               /* Must be aligned to VHPT_SIZE */
    74 
    75 #define PTA_BASE_SHIFT                  15
    76 
    77 /** Memory Attributes. */
    78 #define MA_WRITEBACK    0x0
    79 #define MA_UNCACHEABLE  0x4
    80 
    81 /** Privilege Levels. Only the most and the least privileged ones are ever used. */
    82 #define PL_KERNEL       0x0
    83 #define PL_USER         0x3
    84 
    85 /* Access Rigths. Only certain combinations are used by the kernel. */
    86 #define AR_READ         0x0
    87 #define AR_EXECUTE      0x1
    88 #define AR_WRITE        0x2
    89 
    90 
    91 #define VA_REGION_INDEX 61
    92 
    93 #define VA_REGION(va) (va>>VA_REGION_INDEX)
    94 
    95 #ifndef __ASM__
    9686
    9787struct vhpt_tag_info {
     
    156146} vhpt_entry_t;
    157147
    158 typedef vhpt_entry_t tlb_entry_t;
    159 
    160148struct region_register_map {
    161149        unsigned ve : 1;
     
    231219        __u64 ret;
    232220        ASSERT(i < REGION_REGISTERS);
    233         i=i<<VRN_SHIFT;
    234         __asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i));
    235        
    236         return ret;
    237 }
    238 
     221        __asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT));
     222        return ret;
     223}
    239224
    240225/** Write Region Register.
     
    246231{
    247232        ASSERT(i < REGION_REGISTERS);
    248         i=i<<VRN_SHIFT;
    249233        __asm__ volatile (
    250         "mov rr[%0] = %1;;\n"
    251         :
    252         : "r" (i), "r" (v));
     234                "mov rr[%0] = %1\n"
     235                :
     236                : "r" (i << VRN_SHIFT), "r" (v)
     237        );
    253238}
    254239 
     
    281266extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags);
    282267
    283 
    284 
    285268#endif
    286269
    287270#endif
    288 
    289 
  • arch/ia64/include/mm/tlb.h

    r2c49fbbe ra0d74fd  
    3939#include <typedefs.h>
    4040
    41 extern void tc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry, bool dtc);
    42 extern void dtc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry);
    43 extern void itc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry);
     41/** Data and instruction Translation Register indices. */
     42#define DTR_KERNEL      0
     43#define ITR_KERNEL      0
     44#define DTR_KSTACK      1
     45
     46/** Portion of TLB insertion format data structure. */
     47union tlb_entry {
     48        __u64 word[2];
     49        struct {
     50                /* Word 0 */
     51                unsigned p : 1;                 /**< Present. */
     52                unsigned : 1;
     53                unsigned ma : 3;                /**< Memory attribute. */
     54                unsigned a : 1;                 /**< Accessed. */
     55                unsigned d : 1;                 /**< Dirty. */
     56                unsigned pl : 2;                /**< Privilege level. */
     57                unsigned ar : 3;                /**< Access rights. */
     58                unsigned long long ppn : 38;    /**< Physical Page Number, a.k.a. PFN. */
     59                unsigned : 2;
     60                unsigned ed : 1;
     61                unsigned ig1 : 11;
     62
     63                /* Word 1 */
     64                unsigned : 2;
     65                unsigned ps : 6;                /**< Page size will be 2^ps. */
     66                unsigned key : 24;              /**< Protection key, unused. */
     67                unsigned : 32;
     68        } __attribute__ ((packed));
     69} __attribute__ ((packed));
     70typedef union tlb_entry tlb_entry_t;
     71
     72extern void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc);
     73extern void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
     74extern void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry);
    4475
    4576extern void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr);
    4677extern void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
    4778extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr);
     79
     80extern void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr);
    4881
    4982extern void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate);
  • arch/ia64/src/dummy.s

    r2c49fbbe ra0d74fd  
    3232.global asm_delay_loop
    3333.global userspace
    34 .global before_thread_runs_arch
    35 .global after_thread_ran_arch
    3634.global cpu_sleep
    3735.global dummy
     
    4038.global fpu_init
    4139
    42 before_thread_runs_arch:
    43 after_thread_ran_arch:
    4440userspace:
    4541calibrate_delay_loop:
  • arch/ia64/src/mm/page.c

    r2c49fbbe ra0d74fd  
    5656void set_environment(void)
    5757{
    58 
    5958        region_register rr;
    6059        pta_register pta;       
     
    6362        /*
    6463         * First set up kernel region register.
    65          * This action is redundand (see start.S) but I would to keep it to make sure that
    66          *no unexpected changes will be made.
     64         * This is redundant (see start.S) but we keep it here just for sure.
    6765         */
    6866        rr.word = rr_read(VRN_KERNEL);
    6967        rr.map.ve = 0;                  /* disable VHPT walker */
    7068        rr.map.ps = PAGE_WIDTH;
    71         rr.map.rid = ASID2RID(ASID_KERNEL,VRN_KERNEL); 
     69        rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);
    7270        rr_write(VRN_KERNEL, rr.word);
    7371        srlz_i();
    7472        srlz_d();
    75        
     73
    7674        /*
    7775         * And invalidate the rest of region register.
     
    8482                rr.word == rr_read(i);
    8583                rr.map.ve = 0;          /* disable VHPT walker */
    86                 rr.map.rid = ASID2RID(ASID_INVALID,i);
     84                rr.map.rid = RID_INVALID;
    8785                rr_write(i, rr.word);
    8886                srlz_i();
     
    10199        srlz_i();
    102100        srlz_d();
    103        
    104 
    105         return ;       
    106        
    107101}
    108102
  • arch/ia64/src/mm/tlb.c

    r2c49fbbe ra0d74fd  
    3232
    3333#include <mm/tlb.h>
     34#include <mm/asid.h>
    3435#include <arch/mm/tlb.h>
     36#include <arch/mm/page.h>
    3537#include <arch/barrier.h>
    3638#include <arch/interrupt.h>
    3739#include <typedefs.h>
    3840#include <panic.h>
     41#include <print.h>
    3942
    4043/** Invalidate all TLB entries. */
     
    8588        bool restore_rr = false;
    8689
    87         if (!(entry.not_present.p))
     90        if (!(entry.p))
    8891                return;
    8992
    90         rr.word = rr_read(VA_REGION(va));
    91         if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
     93        rr.word = rr_read(VA2VRN(va));
     94        if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
    9295                /*
    9396                 * The selected region register does not contain required RID.
     
    97100
    98101                rr0 = rr;
    99                 rr0.map.rid = ASID2RID(asid, VA_REGION(va));
    100                 rr_write(VA_REGION(va), rr0.word);
     102                rr0.map.rid = ASID2RID(asid, VA2VRN(va));
     103                rr_write(VA2VRN(va), rr0.word);
    101104                srlz_d();
    102105                srlz_i();
     
    121124       
    122125        if (restore_rr) {
    123                 rr_write(VA_REGION(va),rr.word);
     126                rr_write(VA2VRN(va), rr.word);
    124127                srlz_d();
    125128                srlz_i();
     
    164167        bool restore_rr = false;
    165168
    166         if (!(entry.not_present.p))
     169        if (!(entry.p))
    167170                return;
    168171
    169         rr.word = rr_read(VA_REGION(va));
    170         if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA_REGION(va))))) {
     172        rr.word = rr_read(VA2VRN(va));
     173        if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
    171174                /*
    172175                 * The selected region register does not contain required RID.
     
    176179
    177180                rr0 = rr;
    178                 rr0.map.rid = ASID2RID(asid, VA_REGION(va));
    179                 rr_write(VA_REGION(va), rr0.word);
     181                rr0.map.rid = ASID2RID(asid, VA2VRN(va));
     182                rr_write(VA2VRN(va), rr0.word);
    180183                srlz_d();
    181184                srlz_i();
     
    200203       
    201204        if (restore_rr) {
    202                 rr_write(VA_REGION(va),rr.word);
    203                 srlz_d();
    204                 srlz_i();
    205         }
     205                rr_write(VA2VRN(va), rr.word);
     206                srlz_d();
     207                srlz_i();
     208        }
     209}
     210
     211/** Insert data into DTLB.
     212 *
     213 * @param va Virtual page address.
     214 * @param asid Address space identifier.
     215 * @param entry The rest of TLB entry as required by TLB insertion format.
     216 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
     217 * @param tr Translation register if dtr is true, ignored otherwise.
     218 */
     219void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
     220{
     221        tlb_entry_t entry;
     222       
     223        entry.word[0] = 0;
     224        entry.word[1] = 0;
     225       
     226        entry.p = true;                 /* present */
     227        entry.ma = MA_WRITEBACK;
     228        entry.a = true;                 /* already accessed */
     229        entry.d = true;                 /* already dirty */
     230        entry.pl = PL_KERNEL;
     231        entry.ar = AR_READ | AR_WRITE;
     232        entry.ppn = frame >> PPN_SHIFT;
     233        entry.ps = PAGE_WIDTH;
     234       
     235        if (dtr)
     236                dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
     237        else
     238                dtc_mapping_insert(page, ASID_KERNEL, entry);
    206239}
    207240
     
    211244}
    212245
     246/** Data TLB fault with VHPT turned off.
     247 *
     248 * @param vector Interruption vector.
     249 * @param pstate Structure with saved interruption state.
     250 */
    213251void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
    214252{
    215         panic("%s: %P\n", __FUNCTION__, pstate->cr_ifa);
     253        region_register rr;
     254        rid_t rid;
     255        __address va;
     256       
     257        va = pstate->cr_ifa;    /* faulting address */
     258        rr.word = rr_read(VA2VRN(va));
     259        rid = rr.map.rid;
     260        if (RID2ASID(rid) == ASID_KERNEL) {
     261                if (VA2VRN(va) == VRN_KERNEL) {
     262                        /*
     263                         * Provide KA2PA(identity) mapping for faulting piece of
     264                         * kernel address space.
     265                         */
     266                        dtlb_mapping_insert(va, KA2PA(va), false, 0);
     267                        return;
     268                }
     269        }
     270        panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
    216271}
    217272
  • arch/ia64/src/start.S

    r2c49fbbe ra0d74fd  
    5353        movl r10=(RR_MASK)
    5454        and r9=r10,r9
    55         movl r10=((ASID2RID(ASID_KERNEL,VRN_KERNEL)<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
     55        movl r10=((RID_KERNEL<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))
    5656        or  r9=r10,r9
    5757        mov rr[r8]=r9
  • arch/sparc64/include/mm/tlb.h

    r2c49fbbe ra0d74fd  
    4646#define PAGESIZE_512K   2
    4747#define PAGESIZE_4M     3
     48
     49/** Bit width of the TLB-locked portion of kernel address space. */
     50#define KERNEL_PAGE_WIDTH       22      /* 4M */
    4851
    4952union tlb_context_reg {
  • arch/sparc64/src/proc/scheduler.c

    r2c49fbbe ra0d74fd  
    3131#include <arch.h>
    3232#include <arch/mm/tlb.h>
     33#include <arch/mm/page.h>
    3334#include <config.h>
    3435#include <align.h>
     
    3940        __address base;
    4041       
    41         base = ALIGN_DOWN(config.base, 4*1024*1024);
     42        base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
    4243
    43         if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
     44        if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
    4445                /*
    4546                 * Kernel stack of this thread is not locked in DTLB.
     
    4849                 */
    4950                 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack);
    50                  dtlb_insert_mapping((__address) THREAD->kstack, (__address) THREAD->kstack, PAGESIZE_8K, true, true);
     51                 dtlb_insert_mapping((__address) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
    5152        }       
    5253}
     
    5758        __address base;
    5859
    59         base = ALIGN_DOWN(config.base, 4*1024*1024);
     60        base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
    6061
    61         if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {
     62        if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
    6263                /*
    6364                 * Kernel stack of this thread is locked in DTLB.
Note: See TracChangeset for help on using the changeset viewer.