Changeset ba50a34 in mainline


Ignore:
Timestamp:
2009-12-06T18:29:57Z (15 years ago)
Author:
Pavel Rimsky <pavel@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
5e53e02
Parents:
eb79d60
Message:

Merged fast instr. access MMU miss handler, now the first few instructions of the userspace tasks can be reached.

Location:
kernel
Files:
1 added
7 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/include/mm/sun4v/frame.h

    reb79d60 rba50a34  
    4444#define MMU_FRAME_SIZE          (1 << MMU_FRAME_WIDTH)
    4545
    46 /*
    47  * Page size exported to the generic memory management subsystems.
    48  * This page size is not directly supported by the MMU, but we can emulate
    49  * each 16K page with a pair of adjacent 8K pages.
    50  */
    51 #define FRAME_WIDTH             14      /* 16K */
     46#define FRAME_WIDTH             13
    5247#define FRAME_SIZE              (1 << FRAME_WIDTH)
    5348
  • kernel/arch/sparc64/include/mm/tlb.h

    reb79d60 rba50a34  
    3636#define KERN_sparc64_TLB_H_
    3737
    38 #if defined (US)
    39 #define ITLB_ENTRY_COUNT                64
    40 #define DTLB_ENTRY_COUNT                64
    41 #define DTLB_MAX_LOCKED_ENTRIES         DTLB_ENTRY_COUNT
     38#if defined (SUN4U)
     39#include <arch/mm/sun4u/tlb.h>
     40#elif defined (SUN4V)
     41#include <arch/mm/sun4v/tlb.h>
    4242#endif
    43 
    44 /** TLB_DSMALL is the only of the three DMMUs that can hold locked entries. */
    45 #if defined (US3)
    46 #define DTLB_MAX_LOCKED_ENTRIES         16
    47 #endif
    48 
    49 #define MEM_CONTEXT_KERNEL              0
    50 #define MEM_CONTEXT_TEMP                1
    51 
    52 /** Page sizes. */
    53 #define PAGESIZE_8K     0
    54 #define PAGESIZE_64K    1
    55 #define PAGESIZE_512K   2
    56 #define PAGESIZE_4M     3
    57 
    58 /** Bit width of the TLB-locked portion of kernel address space. */
    59 #define KERNEL_PAGE_WIDTH       22      /* 4M */
    60 
    61 /* TLB Demap Operation types. */
    62 #define TLB_DEMAP_PAGE          0
    63 #define TLB_DEMAP_CONTEXT       1
    64 #if defined (US3)
    65 #define TLB_DEMAP_ALL           2
    66 #endif
    67 
    68 #define TLB_DEMAP_TYPE_SHIFT    6
    69 
    70 /* TLB Demap Operation Context register encodings. */
    71 #define TLB_DEMAP_PRIMARY       0
    72 #define TLB_DEMAP_SECONDARY     1
    73 #define TLB_DEMAP_NUCLEUS       2
    74 
    75 /* There are more TLBs in one MMU in US3, their codes are defined here. */
    76 #if defined (US3)
    77 /* D-MMU: one small (16-entry) TLB and two big (512-entry) TLBs */
    78 #define TLB_DSMALL      0
    79 #define TLB_DBIG_0      2
    80 #define TLB_DBIG_1      3
    81        
    82 /* I-MMU: one small (16-entry) TLB and one big TLB */
    83 #define TLB_ISMALL      0
    84 #define TLB_IBIG        2
    85 #endif
    86 
    87 #define TLB_DEMAP_CONTEXT_SHIFT 4
    88 
    89 /* TLB Tag Access shifts */
    90 #define TLB_TAG_ACCESS_CONTEXT_SHIFT    0
    91 #define TLB_TAG_ACCESS_CONTEXT_MASK     ((1 << 13) - 1)
    92 #define TLB_TAG_ACCESS_VPN_SHIFT        13
    93 
    94 #ifndef __ASM__
    95 
    96 #include <arch/mm/tte.h>
    97 #include <arch/mm/mmu.h>
    98 #include <arch/mm/page.h>
    99 #include <arch/asm.h>
    100 #include <arch/barrier.h>
    101 #include <arch/types.h>
    102 #include <arch/register.h>
    103 #include <arch/cpu.h>
    104 
    105 union tlb_context_reg {
    106         uint64_t v;
    107         struct {
    108                 unsigned long : 51;
    109                 unsigned context : 13;          /**< Context/ASID. */
    110         } __attribute__ ((packed));
    111 };
    112 typedef union tlb_context_reg tlb_context_reg_t;
    113 
    114 /** I-/D-TLB Data In/Access Register type. */
    115 typedef tte_data_t tlb_data_t;
    116 
    117 /** I-/D-TLB Data Access Address in Alternate Space. */
    118 
    119 #if defined (US)
    120 
    121 union tlb_data_access_addr {
    122         uint64_t value;
    123         struct {
    124                 uint64_t : 55;
    125                 unsigned tlb_entry : 6;
    126                 unsigned : 3;
    127         } __attribute__ ((packed));
    128 };
    129 typedef union tlb_data_access_addr dtlb_data_access_addr_t;
    130 typedef union tlb_data_access_addr dtlb_tag_read_addr_t;
    131 typedef union tlb_data_access_addr itlb_data_access_addr_t;
    132 typedef union tlb_data_access_addr itlb_tag_read_addr_t;
    133 
    134 #elif defined (US3)
    135 
    136 /*
    137  * In US3, I-MMU and D-MMU have different formats of the data
    138  * access register virtual address. In the corresponding
    139  * structures the member variable for the entry number is
    140  * called "local_tlb_entry" - it contrasts with the "tlb_entry"
    141  * for the US data access register VA structure. The rationale
    142  * behind this is to prevent careless mistakes in the code
    143  * caused by setting only the entry number and not the TLB
    144  * number in the US3 code (when taking the code from US).
    145  */
    146 
    147 union dtlb_data_access_addr {
    148         uint64_t value;
    149         struct {
    150                 uint64_t : 45;
    151                 unsigned : 1;
    152                 unsigned tlb_number : 2;
    153                 unsigned : 4;
    154                 unsigned local_tlb_entry : 9;
    155                 unsigned : 3;
    156         } __attribute__ ((packed));
    157 };
    158 typedef union dtlb_data_access_addr dtlb_data_access_addr_t;
    159 typedef union dtlb_data_access_addr dtlb_tag_read_addr_t;
    160 
    161 union itlb_data_access_addr {
    162         uint64_t value;
    163         struct {
    164                 uint64_t : 45;
    165                 unsigned : 1;
    166                 unsigned tlb_number : 2;
    167                 unsigned : 6;
    168                 unsigned local_tlb_entry : 7;
    169                 unsigned : 3;
    170         } __attribute__ ((packed));
    171 };
    172 typedef union itlb_data_access_addr itlb_data_access_addr_t;
    173 typedef union itlb_data_access_addr itlb_tag_read_addr_t;
    174 
    175 #endif
    176 
    177 /** I-/D-TLB Tag Read Register. */
    178 union tlb_tag_read_reg {
    179         uint64_t value;
    180         struct {
    181                 uint64_t vpn : 51;      /**< Virtual Address bits 63:13. */
    182                 unsigned context : 13;  /**< Context identifier. */
    183         } __attribute__ ((packed));
    184 };
    185 typedef union tlb_tag_read_reg tlb_tag_read_reg_t;
    186 typedef union tlb_tag_read_reg tlb_tag_access_reg_t;
    187 
    188 
    189 /** TLB Demap Operation Address. */
    190 union tlb_demap_addr {
    191         uint64_t value;
    192         struct {
    193                 uint64_t vpn: 51;       /**< Virtual Address bits 63:13. */
    194 #if defined (US)
    195                 unsigned : 6;           /**< Ignored. */
    196                 unsigned type : 1;      /**< The type of demap operation. */
    197 #elif defined (US3)
    198                 unsigned : 5;           /**< Ignored. */
    199                 unsigned type: 2;       /**< The type of demap operation. */
    200 #endif
    201                 unsigned context : 2;   /**< Context register selection. */
    202                 unsigned : 4;           /**< Zero. */
    203         } __attribute__ ((packed));
    204 };
    205 typedef union tlb_demap_addr tlb_demap_addr_t;
    206 
    207 /** TLB Synchronous Fault Status Register. */
    208 union tlb_sfsr_reg {
    209         uint64_t value;
    210         struct {
    211 #if defined (US)
    212                 unsigned long : 40;     /**< Implementation dependent. */
    213                 unsigned asi : 8;       /**< ASI. */
    214                 unsigned : 2;
    215                 unsigned ft : 7;        /**< Fault type. */
    216 #elif defined (US3)
    217                 unsigned long : 39;     /**< Implementation dependent. */
    218                 unsigned nf : 1;        /**< Non-faulting load. */
    219                 unsigned asi : 8;       /**< ASI. */
    220                 unsigned tm : 1;        /**< I-TLB miss. */
    221                 unsigned : 3;           /**< Reserved. */
    222                 unsigned ft : 5;        /**< Fault type. */
    223 #endif
    224                 unsigned e : 1;         /**< Side-effect bit. */
    225                 unsigned ct : 2;        /**< Context Register selection. */
    226                 unsigned pr : 1;        /**< Privilege bit. */
    227                 unsigned w : 1;         /**< Write bit. */
    228                 unsigned ow : 1;        /**< Overwrite bit. */
    229                 unsigned fv : 1;        /**< Fault Valid bit. */
    230         } __attribute__ ((packed));
    231 };
    232 typedef union tlb_sfsr_reg tlb_sfsr_reg_t;
    233 
    234 #if defined (US3)
    235 
    236 /*
    237  * Functions for determining the number of entries in TLBs. They either return
    238  * a constant value or a value based on the CPU autodetection.
    239  */
    240 
    241 /**
    242  * Determine the number of entries in the DMMU's small TLB.
    243  */
    244 static inline uint16_t tlb_dsmall_size(void)
    245 {
    246         return 16;
    247 }
    248 
    249 /**
    250  * Determine the number of entries in each DMMU's big TLB.
    251  */
    252 static inline uint16_t tlb_dbig_size(void)
    253 {
    254         return 512;
    255 }
    256 
    257 /**
    258  * Determine the number of entries in the IMMU's small TLB.
    259  */
    260 static inline uint16_t tlb_ismall_size(void)
    261 {
    262         return 16;
    263 }
    264 
    265 /**
    266  * Determine the number of entries in the IMMU's big TLB.
    267  */
    268 static inline uint16_t tlb_ibig_size(void)
    269 {
    270         if (((ver_reg_t) ver_read()).impl == IMPL_ULTRASPARCIV_PLUS)
    271                 return 512;
    272         else
    273                 return 128;
    274 }
    275 
    276 #endif
    277 
    278 /** Read MMU Primary Context Register.
    279  *
    280  * @return              Current value of Primary Context Register.
    281  */
    282 static inline uint64_t mmu_primary_context_read(void)
    283 {
    284         return asi_u64_read(ASI_DMMU, VA_PRIMARY_CONTEXT_REG);
    285 }
    286 
    287 /** Write MMU Primary Context Register.
    288  *
    289  * @param v             New value of Primary Context Register.
    290  */
    291 static inline void mmu_primary_context_write(uint64_t v)
    292 {
    293         asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v);
    294         flush_pipeline();
    295 }
    296 
    297 /** Read MMU Secondary Context Register.
    298  *
    299  * @return              Current value of Secondary Context Register.
    300  */
    301 static inline uint64_t mmu_secondary_context_read(void)
    302 {
    303         return asi_u64_read(ASI_DMMU, VA_SECONDARY_CONTEXT_REG);
    304 }
    305 
    306 /** Write MMU Primary Context Register.
    307  *
    308  * @param v             New value of Primary Context Register.
    309  */
    310 static inline void mmu_secondary_context_write(uint64_t v)
    311 {
    312         asi_u64_write(ASI_DMMU, VA_SECONDARY_CONTEXT_REG, v);
    313         flush_pipeline();
    314 }
    315 
    316 #if defined (US)
    317 
    318 /** Read IMMU TLB Data Access Register.
    319  *
    320  * @param entry         TLB Entry index.
    321  *
    322  * @return              Current value of specified IMMU TLB Data Access
    323  *                      Register.
    324  */
    325 static inline uint64_t itlb_data_access_read(size_t entry)
    326 {
    327         itlb_data_access_addr_t reg;
    328        
    329         reg.value = 0;
    330         reg.tlb_entry = entry;
    331         return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);
    332 }
    333 
    334 /** Write IMMU TLB Data Access Register.
    335  *
    336  * @param entry         TLB Entry index.
    337  * @param value         Value to be written.
    338  */
    339 static inline void itlb_data_access_write(size_t entry, uint64_t value)
    340 {
    341         itlb_data_access_addr_t reg;
    342        
    343         reg.value = 0;
    344         reg.tlb_entry = entry;
    345         asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
    346         flush_pipeline();
    347 }
    348 
    349 /** Read DMMU TLB Data Access Register.
    350  *
    351  * @param entry         TLB Entry index.
    352  *
    353  * @return              Current value of specified DMMU TLB Data Access
    354  *                      Register.
    355  */
    356 static inline uint64_t dtlb_data_access_read(size_t entry)
    357 {
    358         dtlb_data_access_addr_t reg;
    359        
    360         reg.value = 0;
    361         reg.tlb_entry = entry;
    362         return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);
    363 }
    364 
    365 /** Write DMMU TLB Data Access Register.
    366  *
    367  * @param entry         TLB Entry index.
    368  * @param value         Value to be written.
    369  */
    370 static inline void dtlb_data_access_write(size_t entry, uint64_t value)
    371 {
    372         dtlb_data_access_addr_t reg;
    373        
    374         reg.value = 0;
    375         reg.tlb_entry = entry;
    376         asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);
    377         membar();
    378 }
    379 
    380 /** Read IMMU TLB Tag Read Register.
    381  *
    382  * @param entry         TLB Entry index.
    383  *
    384  * @return              Current value of specified IMMU TLB Tag Read Register.
    385  */
    386 static inline uint64_t itlb_tag_read_read(size_t entry)
    387 {
    388         itlb_tag_read_addr_t tag;
    389 
    390         tag.value = 0;
    391         tag.tlb_entry = entry;
    392         return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);
    393 }
    394 
    395 /** Read DMMU TLB Tag Read Register.
    396  *
    397  * @param entry         TLB Entry index.
    398  *
    399  * @return              Current value of specified DMMU TLB Tag Read Register.
    400  */
    401 static inline uint64_t dtlb_tag_read_read(size_t entry)
    402 {
    403         dtlb_tag_read_addr_t tag;
    404 
    405         tag.value = 0;
    406         tag.tlb_entry = entry;
    407         return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
    408 }
    409 
    410 #elif defined (US3)
    411 
    412 
    413 /** Read IMMU TLB Data Access Register.
    414  *
    415  * @param tlb           TLB number (one of TLB_ISMALL or TLB_IBIG)
    416  * @param entry         TLB Entry index.
    417  *
    418  * @return              Current value of specified IMMU TLB Data Access
    419  *                      Register.
    420  */
    421 static inline uint64_t itlb_data_access_read(int tlb, size_t entry)
    422 {
    423         itlb_data_access_addr_t reg;
    424        
    425         reg.value = 0;
    426         reg.tlb_number = tlb;
    427         reg.local_tlb_entry = entry;
    428         return asi_u64_read(ASI_ITLB_DATA_ACCESS_REG, reg.value);
    429 }
    430 
    431 /** Write IMMU TLB Data Access Register.
    432  * @param tlb           TLB number (one of TLB_ISMALL or TLB_IBIG)
    433  * @param entry         TLB Entry index.
    434  * @param value         Value to be written.
    435  */
    436 static inline void itlb_data_access_write(int tlb, size_t entry,
    437         uint64_t value)
    438 {
    439         itlb_data_access_addr_t reg;
    440        
    441         reg.value = 0;
    442         reg.tlb_number = tlb;
    443         reg.local_tlb_entry = entry;
    444         asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
    445         flush_pipeline();
    446 }
    447 
    448 /** Read DMMU TLB Data Access Register.
    449  *
    450  * @param tlb           TLB number (one of TLB_DSMALL, TLB_DBIG, TLB_DBIG)
    451  * @param entry         TLB Entry index.
    452  *
    453  * @return              Current value of specified DMMU TLB Data Access
    454  *                      Register.
    455  */
    456 static inline uint64_t dtlb_data_access_read(int tlb, size_t entry)
    457 {
    458         dtlb_data_access_addr_t reg;
    459        
    460         reg.value = 0;
    461         reg.tlb_number = tlb;
    462         reg.local_tlb_entry = entry;
    463         return asi_u64_read(ASI_DTLB_DATA_ACCESS_REG, reg.value);
    464 }
    465 
    466 /** Write DMMU TLB Data Access Register.
    467  *
    468  * @param tlb           TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1) 
    469  * @param entry         TLB Entry index.
    470  * @param value         Value to be written.
    471  */
    472 static inline void dtlb_data_access_write(int tlb, size_t entry,
    473         uint64_t value)
    474 {
    475         dtlb_data_access_addr_t reg;
    476        
    477         reg.value = 0;
    478         reg.tlb_number = tlb;
    479         reg.local_tlb_entry = entry;
    480         asi_u64_write(ASI_DTLB_DATA_ACCESS_REG, reg.value, value);
    481         membar();
    482 }
    483 
    484 /** Read IMMU TLB Tag Read Register.
    485  *
    486  * @param tlb           TLB number (one of TLB_ISMALL or TLB_IBIG)
    487  * @param entry         TLB Entry index.
    488  *
    489  * @return              Current value of specified IMMU TLB Tag Read Register.
    490  */
    491 static inline uint64_t itlb_tag_read_read(int tlb, size_t entry)
    492 {
    493         itlb_tag_read_addr_t tag;
    494 
    495         tag.value = 0;
    496         tag.tlb_number = tlb;
    497         tag.local_tlb_entry = entry;
    498         return asi_u64_read(ASI_ITLB_TAG_READ_REG, tag.value);
    499 }
    500 
    501 /** Read DMMU TLB Tag Read Register.
    502  *
    503  * @param tlb           TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1)
    504  * @param entry         TLB Entry index.
    505  *
    506  * @return              Current value of specified DMMU TLB Tag Read Register.
    507  */
    508 static inline uint64_t dtlb_tag_read_read(int tlb, size_t entry)
    509 {
    510         dtlb_tag_read_addr_t tag;
    511 
    512         tag.value = 0;
    513         tag.tlb_number = tlb;
    514         tag.local_tlb_entry = entry;
    515         return asi_u64_read(ASI_DTLB_TAG_READ_REG, tag.value);
    516 }
    517 
    518 #endif
    519 
    520 
    521 /** Write IMMU TLB Tag Access Register.
    522  *
    523  * @param v             Value to be written.
    524  */
    525 static inline void itlb_tag_access_write(uint64_t v)
    526 {
    527         asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v);
    528         flush_pipeline();
    529 }
    530 
    531 /** Read IMMU TLB Tag Access Register.
    532  *
    533  * @return              Current value of IMMU TLB Tag Access Register.
    534  */
    535 static inline uint64_t itlb_tag_access_read(void)
    536 {
    537         return asi_u64_read(ASI_IMMU, VA_IMMU_TAG_ACCESS);
    538 }
    539 
    540 /** Write DMMU TLB Tag Access Register.
    541  *
    542  * @param v             Value to be written.
    543  */
    544 static inline void dtlb_tag_access_write(uint64_t v)
    545 {
    546         asi_u64_write(ASI_DMMU, VA_DMMU_TAG_ACCESS, v);
    547         membar();
    548 }
    549 
    550 /** Read DMMU TLB Tag Access Register.
    551  *
    552  * @return              Current value of DMMU TLB Tag Access Register.
    553  */
    554 static inline uint64_t dtlb_tag_access_read(void)
    555 {
    556         return asi_u64_read(ASI_DMMU, VA_DMMU_TAG_ACCESS);
    557 }
    558 
    559 
    560 /** Write IMMU TLB Data in Register.
    561  *
    562  * @param v             Value to be written.
    563  */
    564 static inline void itlb_data_in_write(uint64_t v)
    565 {
    566         asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v);
    567         flush_pipeline();
    568 }
    569 
    570 /** Write DMMU TLB Data in Register.
    571  *
    572  * @param v             Value to be written.
    573  */
    574 static inline void dtlb_data_in_write(uint64_t v)
    575 {
    576         asi_u64_write(ASI_DTLB_DATA_IN_REG, 0, v);
    577         membar();
    578 }
    579 
    580 /** Read ITLB Synchronous Fault Status Register.
    581  *
    582  * @return              Current content of I-SFSR register.
    583  */
    584 static inline uint64_t itlb_sfsr_read(void)
    585 {
    586         return asi_u64_read(ASI_IMMU, VA_IMMU_SFSR);
    587 }
    588 
    589 /** Write ITLB Synchronous Fault Status Register.
    590  *
    591  * @param v             New value of I-SFSR register.
    592  */
    593 static inline void itlb_sfsr_write(uint64_t v)
    594 {
    595         asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v);
    596         flush_pipeline();
    597 }
    598 
    599 /** Read DTLB Synchronous Fault Status Register.
    600  *
    601  * @return              Current content of D-SFSR register.
    602  */
    603 static inline uint64_t dtlb_sfsr_read(void)
    604 {
    605         return asi_u64_read(ASI_DMMU, VA_DMMU_SFSR);
    606 }
    607 
    608 /** Write DTLB Synchronous Fault Status Register.
    609  *
    610  * @param v             New value of D-SFSR register.
    611  */
    612 static inline void dtlb_sfsr_write(uint64_t v)
    613 {
    614         asi_u64_write(ASI_DMMU, VA_DMMU_SFSR, v);
    615         membar();
    616 }
    617 
    618 /** Read DTLB Synchronous Fault Address Register.
    619  *
    620  * @return              Current content of D-SFAR register.
    621  */
    622 static inline uint64_t dtlb_sfar_read(void)
    623 {
    624         return asi_u64_read(ASI_DMMU, VA_DMMU_SFAR);
    625 }
    626 
    627 /** Perform IMMU TLB Demap Operation.
    628  *
    629  * @param type          Selects between context and page demap (and entire MMU
    630  *                      demap on US3).
    631  * @param context_encoding Specifies which Context register has Context ID for
    632  *                      demap.
    633  * @param page          Address which is on the page to be demapped.
    634  */
    635 static inline void itlb_demap(int type, int context_encoding, uintptr_t page)
    636 {
    637         tlb_demap_addr_t da;
    638         page_address_t pg;
    639        
    640         da.value = 0;
    641         pg.address = page;
    642        
    643         da.type = type;
    644         da.context = context_encoding;
    645         da.vpn = pg.vpn;
    646        
    647         /* da.value is the address within the ASI */
    648         asi_u64_write(ASI_IMMU_DEMAP, da.value, 0);
    649 
    650         flush_pipeline();
    651 }
    652 
    653 /** Perform DMMU TLB Demap Operation.
    654  *
    655  * @param type          Selects between context and page demap (and entire MMU
    656  *                      demap on US3).
    657  * @param context_encoding Specifies which Context register has Context ID for
    658  *                      demap.
    659  * @param page          Address which is on the page to be demapped.
    660  */
    661 static inline void dtlb_demap(int type, int context_encoding, uintptr_t page)
    662 {
    663         tlb_demap_addr_t da;
    664         page_address_t pg;
    665        
    666         da.value = 0;
    667         pg.address = page;
    668        
    669         da.type = type;
    670         da.context = context_encoding;
    671         da.vpn = pg.vpn;
    672        
    673         /* da.value is the address within the ASI */
    674         asi_u64_write(ASI_DMMU_DEMAP, da.value, 0);
    675 
    676         membar();
    677 }
    678 
    679 extern void fast_instruction_access_mmu_miss(unative_t, istate_t *);
    680 extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t, istate_t *);
    681 extern void fast_data_access_protection(tlb_tag_access_reg_t , istate_t *);
    682 
    683 extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool);
    684 
    685 extern void dump_sfsr_and_sfar(void);
    686 
    687 #endif /* !def __ASM__ */
    68843
    68944#endif
  • kernel/arch/sparc64/src/mm/sun4v/as.c

    reb79d60 rba50a34  
    6161{
    6262#ifdef CONFIG_TSB
    63         /*
    64          * The order must be calculated with respect to the emulated
    65          * 16K page size.
    66          */
    67         int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
    68             sizeof(tsb_entry_t)) >> FRAME_WIDTH);
     63        int order = fnzb32(
     64                (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH);
    6965
    70         uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
     66        uintptr_t tsb = (uintptr_t) frame_alloc(order, flags);
    7167
    7268        if (!tsb)
    7369                return -1;
    7470
    75         as->arch.itsb = (tsb_entry_t *) tsb;
    76         as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
    77             sizeof(tsb_entry_t));
     71        as->arch.tsb_description.page_size = PAGESIZE_8K;
     72        as->arch.tsb_description.associativity = 1;
     73        as->arch.tsb_description.num_ttes = TSB_ENTRY_COUNT;
     74        as->arch.tsb_description.pgsize_mask = 1 << PAGESIZE_8K;
     75        as->arch.tsb_description.tsb_base = tsb;
     76        as->arch.tsb_description.reserved = 0;
     77        as->arch.tsb_description.context = 0;
    7878
    79         memsetb(as->arch.itsb,
    80             (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
     79        memsetb((void *) PA2KA(as->arch.tsb_description.tsb_base),
     80                TSB_ENTRY_COUNT * sizeof(tsb_entry_t), 0);
    8181#endif
    8282        return 0;
     
    8686{
    8787#ifdef CONFIG_TSB
    88         /*
    89          * The count must be calculated with respect to the emualted 16K page
    90          * size.
    91          */
    92         size_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
    93             sizeof(tsb_entry_t)) >> FRAME_WIDTH;
    94         frame_free(KA2PA((uintptr_t) as->arch.itsb));
     88        count_t cnt = (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) >> FRAME_WIDTH;
     89        frame_free((uintptr_t) as->arch.tsb_description.tsb_base);
    9590        return cnt;
    9691#else
     
    116111void as_install_arch(as_t *as)
    117112{
    118 #if 0
    119         tlb_context_reg_t ctx;
    120        
    121         /*
    122          * Note that we don't and may not lock the address space. That's ok
    123          * since we only read members that are currently read-only.
    124          *
    125          * Moreover, the as->asid is protected by asidlock, which is being held.
    126          */
    127        
    128         /*
    129          * Write ASID to secondary context register. The primary context
    130          * register has to be set from TL>0 so it will be filled from the
    131          * secondary context register from the TL=1 code just before switch to
    132          * userspace.
    133          */
    134         ctx.v = 0;
    135         ctx.context = as->asid;
    136         mmu_secondary_context_write(ctx.v);
    137 
    138 #ifdef CONFIG_TSB       
    139         uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
    140 
    141         ASSERT(as->arch.itsb && as->arch.dtsb);
    142 
    143         uintptr_t tsb = (uintptr_t) as->arch.itsb;
    144                
    145         if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
    146                 /*
    147                  * TSBs were allocated from memory not covered
    148                  * by the locked 4M kernel DTLB entry. We need
    149                  * to map both TSBs explicitly.
    150                  */
    151                 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
    152                 dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
    153         }
    154                
    155         /*
    156          * Setup TSB Base registers.
    157          */
    158         tsb_base_reg_t tsb_base;
    159                
    160         tsb_base.value = 0;
    161         tsb_base.size = TSB_SIZE;
    162         tsb_base.split = 0;
    163 
    164         tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
    165         itsb_base_write(tsb_base.value);
    166         tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
    167         dtsb_base_write(tsb_base.value);
    168        
    169 #if defined (US3)
    170         /*
    171          * Clear the extension registers.
    172          * In HelenOS, primary and secondary context registers contain
    173          * equal values and kernel misses (context 0, ie. the nucleus context)
    174          * are excluded from the TSB miss handler, so it makes no sense
    175          * to have separate TSBs for primary, secondary and nucleus contexts.
    176          * Clearing the extension registers will ensure that the value of the
    177          * TSB Base register will be used as an address of TSB, making the code
    178          * compatible with the US port.
    179          */
    180         itsb_primary_extension_write(0);
    181         itsb_nucleus_extension_write(0);
    182         dtsb_primary_extension_write(0);
    183         dtsb_secondary_extension_write(0);
    184         dtsb_nucleus_extension_write(0);
    185 #endif
    186 #endif
    187 #endif
     113        mmu_secondary_context_write(as->asid);
    188114}
    189115
  • kernel/arch/sparc64/src/mm/sun4v/tlb.c

    reb79d60 rba50a34  
    6060#endif
    6161
    62 static void dtlb_pte_copy(pte_t *, size_t, bool);
    63 static void itlb_pte_copy(pte_t *, size_t);
     62//static void dtlb_pte_copy(pte_t *, size_t, bool);
     63static void itlb_pte_copy(pte_t *);
    6464static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
    65 static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,
    66     const char *);
    67 static void do_fast_data_access_protection_fault(istate_t *,
    68     tlb_tag_access_reg_t, const char *);
     65//static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
     66//    const char *);
     67//static void do_fast_data_access_protection_fault(istate_t *,
     68//    uint64_t, const char *);
    6969
    7070char *context_encoding[] = {
     
    132132}
    133133
     134#if 0
    134135/** Copy PTE to TLB.
    135136 *
     
    141142void dtlb_pte_copy(pte_t *t, size_t index, bool ro)
    142143{
    143 #if 0
    144144        tlb_tag_access_reg_t tag;
    145145        tlb_data_t data;
     
    170170
    171171        dtlb_data_in_write(data.value);
    172 #endif
    173 }
     172}
     173#endif
    174174
    175175/** Copy PTE to ITLB.
    176176 *
    177177 * @param t             Page Table Entry to be copied.
    178  * @param index         Zero if lower 8K-subpage, one if higher 8K-subpage.
    179  */
    180 void itlb_pte_copy(pte_t *t, size_t index)
    181 {
    182 #if 0
    183         tlb_tag_access_reg_t tag;
    184         tlb_data_t data;
    185         page_address_t pg;
    186         frame_address_t fr;
    187 
    188         pg.address = t->page + (index << MMU_PAGE_WIDTH);
    189         fr.address = t->frame + (index << MMU_PAGE_WIDTH);
    190 
    191         tag.value = 0;
    192         tag.context = t->as->asid;
    193         tag.vpn = pg.vpn;
    194        
    195         itlb_tag_access_write(tag.value);
     178 */
     179void itlb_pte_copy(pte_t *t)
     180{
     181        tte_data_t data;
    196182       
    197183        data.value = 0;
    198184        data.v = true;
     185        data.nfo = false;
     186        data.ra = (t->frame) >> FRAME_WIDTH;
     187        data.ie = false;
     188        data.e = false;
     189        data.cp = t->c;
     190        data.cv = false;
     191        data.p = t->k;
     192        data.x = true;
     193        data.w = false;
    199194        data.size = PAGESIZE_8K;
    200         data.pfn = fr.pfn;
    201         data.l = false;
    202         data.cp = t->c;
    203         data.p = t->k;          /* p like privileged */
    204         data.w = false;
    205         data.g = t->g;
    206195       
    207         itlb_data_in_write(data.value);
    208 #endif
     196        __hypercall_hyperfast(
     197                t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
    209198}
    210199
     
    212201void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
    213202{
    214         asm volatile ("sethi 0x41906, %g0");
    215         uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
    216         size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
     203        uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
    217204        pte_t *t;
    218205
    219206        page_table_lock(AS, true);
    220         t = page_mapping_find(AS, page_16k);
     207        t = page_mapping_find(AS, va);
     208
    221209        if (t && PTE_EXECUTABLE(t)) {
    222210                /*
     
    225213                 */
    226214                t->a = true;
    227                 itlb_pte_copy(t, index);
     215                itlb_pte_copy(t);
    228216#ifdef CONFIG_TSB
    229                 itsb_pte_copy(t, index);
     217                itsb_pte_copy(t);
    230218#endif
    231219                page_table_unlock(AS, true);
     
    236224                 */             
    237225                page_table_unlock(AS, true);
    238                 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) ==
    239                     AS_PF_FAULT) {
     226                if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
    240227                        do_fast_instruction_access_mmu_miss_fault(istate,
    241228                            __func__);
     
    248235 * Note that some faults (e.g. kernel faults) were already resolved by the
    249236 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
     237 *
     238 * @param page_and_ctx  A 64-bit value describing the fault. The most
     239 *                      significant 51 bits of the value contain the virtual
     240 *                      address which caused the fault truncated to the page
     241 *                      boundary. The least significant 13 bits of the value
     242 *                      contain the number of the context in which the fault
     243 *                      occurred.
     244 * @param istate        Interrupted state saved on the stack.
     245 */
     246void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
     247{
     248#if 0
     249        pte_t *t;
     250        uintptr_t va = DMISS_ADDRESS(page_and_ctx);
     251        uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
     252
     253        if (ctx == ASID_KERNEL) {
     254                if (va == 0) {
     255                        /* NULL access in kernel */
     256                        do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
     257                            __func__);
     258                }
     259                do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
     260                    "kernel page fault.");
     261        }
     262
     263        page_table_lock(AS, true);
     264        t = page_mapping_find(AS, va);
     265        if (t) {
     266                /*
     267                 * The mapping was found in the software page hash table.
     268                 * Insert it into DTLB.
     269                 */
     270                t->a = true;
     271                dtlb_pte_copy(t, true);
     272#ifdef CONFIG_TSB
     273                dtsb_pte_copy(t, true);
     274#endif
     275                page_table_unlock(AS, true);
     276        } else {
     277                /*
     278                 * Forward the page fault to the address space page fault
     279                 * handler.
     280                 */             
     281                page_table_unlock(AS, true);
     282                if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
     283                        do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
     284                            __func__);
     285                }
     286        }
     287#endif
     288}
     289
     290/** DTLB protection fault handler.
    250291 *
    251292 * @param tag           Content of the TLB Tag Access register as it existed
     
    255296 * @param istate        Interrupted state saved on the stack.
    256297 */
    257 void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
    258 {
    259         uintptr_t page_8k;
    260         uintptr_t page_16k;
    261         size_t index;
     298void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
     299{
     300#if 0
    262301        pte_t *t;
    263302
    264         page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH;
    265         page_16k = ALIGN_DOWN(page_8k, PAGE_SIZE);
    266         index = tag.vpn % MMU_PAGES_PER_PAGE;
    267 
    268         if (tag.context == ASID_KERNEL) {
    269                 if (!tag.vpn) {
    270                         /* NULL access in kernel */
    271                         do_fast_data_access_mmu_miss_fault(istate, tag,
    272                             __func__);
    273 //MH
    274                 } else {
    275 //              } else if (page_8k >= end_of_identity) {
    276                         /*
    277                          * The kernel is accessing the I/O space.
    278                          * We still do identity mapping for I/O,
    279                          * but without caching.
    280                          */
    281                         dtlb_insert_mapping(page_8k, KA2PA(page_8k),
    282                             PAGESIZE_8K, false, false);
    283                         return;
    284                 }
    285                 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
    286                     "kernel page fault.");
    287         }
     303        uintptr_t va = DMISS_ADDRESS(page_and_ctx);
     304        uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
    288305
    289306        page_table_lock(AS, true);
    290         t = page_mapping_find(AS, page_16k);
    291         if (t) {
    292                 /*
    293                  * The mapping was found in the software page hash table.
    294                  * Insert it into DTLB.
    295                  */
    296                 t->a = true;
    297                 dtlb_pte_copy(t, index, true);
    298 #ifdef CONFIG_TSB
    299                 dtsb_pte_copy(t, index, true);
    300 #endif
    301                 page_table_unlock(AS, true);
    302         } else {
    303                 /*
    304                  * Forward the page fault to the address space page fault
    305                  * handler.
    306                  */             
    307                 page_table_unlock(AS, true);
    308                 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) ==
    309                     AS_PF_FAULT) {
    310                         do_fast_data_access_mmu_miss_fault(istate, tag,
    311                             __func__);
    312                 }
    313         }
    314 }
    315 
    316 /** DTLB protection fault handler.
    317  *
    318  * @param tag           Content of the TLB Tag Access register as it existed
    319  *                      when the trap happened. This is to prevent confusion
    320  *                      created by clobbered Tag Access register during a nested
    321  *                      DTLB miss.
    322  * @param istate        Interrupted state saved on the stack.
    323  */
    324 void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
    325 {
    326         uintptr_t page_16k;
    327         size_t index;
    328         pte_t *t;
    329 
    330         page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
    331         index = tag.vpn % MMU_PAGES_PER_PAGE;   /* 16K-page emulation */
    332 
    333         page_table_lock(AS, true);
    334         t = page_mapping_find(AS, page_16k);
     307        t = page_mapping_find(AS, va);
    335308        if (t && PTE_WRITABLE(t)) {
    336309                /*
     
    341314                t->a = true;
    342315                t->d = true;
    343                 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
    344                     page_16k + index * MMU_PAGE_SIZE);
    345                 dtlb_pte_copy(t, index, false);
     316                mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
     317                dtlb_pte_copy(t, false);
    346318#ifdef CONFIG_TSB
    347                 dtsb_pte_copy(t, index, false);
     319                dtsb_pte_copy(t, false);
    348320#endif
    349321                page_table_unlock(AS, true);
     
    360332                }
    361333        }
     334#endif
    362335}
    363336
     
    371344 * @param d             TLB entry data
    372345 */
    373 static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
    374 {
    375 #if 0
    376         printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
    377             "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
    378             "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
    379             t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
    380             d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
    381 #endif
    382 }
    383 
    384 #if defined (US)
    385 
    386 /** Print contents of both TLBs. */
    387346void tlb_print(void)
    388347{
    389         int i;
    390         tlb_data_t d;
    391         tlb_tag_read_reg_t t;
    392        
    393         printf("I-TLB contents:\n");
    394         for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
    395                 d.value = itlb_data_access_read(i);
    396                 t.value = itlb_tag_read_read(i);
    397                 print_tlb_entry(i, t, d);
    398         }
    399 
    400         printf("D-TLB contents:\n");
    401         for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
    402                 d.value = dtlb_data_access_read(i);
    403                 t.value = dtlb_tag_read_read(i);
    404                 print_tlb_entry(i, t, d);
    405         }
    406 }
    407 
    408 #elif defined (US3)
    409 
    410 /** Print contents of all TLBs. */
    411 void tlb_print(void)
    412 {
    413         int i;
    414         tlb_data_t d;
    415         tlb_tag_read_reg_t t;
    416        
    417         printf("TLB_ISMALL contents:\n");
    418         for (i = 0; i < tlb_ismall_size(); i++) {
    419                 d.value = dtlb_data_access_read(TLB_ISMALL, i);
    420                 t.value = dtlb_tag_read_read(TLB_ISMALL, i);
    421                 print_tlb_entry(i, t, d);
    422         }
    423        
    424         printf("TLB_IBIG contents:\n");
    425         for (i = 0; i < tlb_ibig_size(); i++) {
    426                 d.value = dtlb_data_access_read(TLB_IBIG, i);
    427                 t.value = dtlb_tag_read_read(TLB_IBIG, i);
    428                 print_tlb_entry(i, t, d);
    429         }
    430        
    431         printf("TLB_DSMALL contents:\n");
    432         for (i = 0; i < tlb_dsmall_size(); i++) {
    433                 d.value = dtlb_data_access_read(TLB_DSMALL, i);
    434                 t.value = dtlb_tag_read_read(TLB_DSMALL, i);
    435                 print_tlb_entry(i, t, d);
    436         }
    437        
    438         printf("TLB_DBIG_1 contents:\n");
    439         for (i = 0; i < tlb_dbig_size(); i++) {
    440                 d.value = dtlb_data_access_read(TLB_DBIG_0, i);
    441                 t.value = dtlb_tag_read_read(TLB_DBIG_0, i);
    442                 print_tlb_entry(i, t, d);
    443         }
    444        
    445         printf("TLB_DBIG_2 contents:\n");
    446         for (i = 0; i < tlb_dbig_size(); i++) {
    447                 d.value = dtlb_data_access_read(TLB_DBIG_1, i);
    448                 t.value = dtlb_tag_read_read(TLB_DBIG_1, i);
    449                 print_tlb_entry(i, t, d);
    450         }
    451 }
    452 
    453 #endif
     348        printf("Operation not possible on Niagara.\n");
     349}
    454350
    455351void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
     
    461357}
    462358
     359#if 0
    463360void do_fast_data_access_mmu_miss_fault(istate_t *istate,
    464     tlb_tag_access_reg_t tag, const char *str)
    465 {
    466         uintptr_t va;
    467 
    468         va = tag.vpn << MMU_PAGE_WIDTH;
    469         if (tag.context) {
    470                 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
    471                     tag.context);
     361    uint64_t page_and_ctx, const char *str)
     362{
     363        if (DMISS_CONTEXT(page_and_ctx)) {
     364                fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
     365                    DMISS_CONTEXT(page_and_ctx));
    472366        }
    473367        dump_istate(istate);
     
    475369        panic("%s.", str);
    476370}
    477 
     371#endif
     372
     373#if 0
    478374void do_fast_data_access_protection_fault(istate_t *istate,
    479     tlb_tag_access_reg_t tag, const char *str)
    480 {
    481         uintptr_t va;
    482 
    483         va = tag.vpn << MMU_PAGE_WIDTH;
    484 
    485         if (tag.context) {
    486                 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d).", str, va,
    487                     tag.context);
    488         }
    489         printf("Faulting page: %p, ASID=%d\n", va, tag.context);
     375    uint64_t page_and_ctx, const char *str)
     376{
     377        if (DMISS_CONTEXT(page_and_ctx)) {
     378                fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
     379                    DMISS_CONTEXT(page_and_ctx));
     380        }
     381        printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
    490382        dump_istate(istate);
    491383        panic("%s.", str);
    492384}
    493 
    494 void dump_sfsr_and_sfar(void)
    495 {
    496         tlb_sfsr_reg_t sfsr;
    497         uintptr_t sfar;
    498 
    499         sfsr.value = dtlb_sfsr_read();
    500         sfar = dtlb_sfar_read();
    501        
    502 #if defined (US)
    503         printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
    504             "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
    505             sfsr.ow, sfsr.fv);
    506 #elif defined (US3)
    507         printf("DTLB SFSR: nf=%d, asi=%#x, tm=%d, ft=%#x, e=%d, ct=%d, pr=%d, "
    508             "w=%d, ow=%d, fv=%d\n", sfsr.nf, sfsr.asi, sfsr.tm, sfsr.ft,
    509             sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
    510 #endif
    511            
    512         printf("DTLB SFAR: address=%p\n", sfar);
    513        
    514         dtlb_sfsr_write(0);
     385#endif
     386
     387/**
     388 * Describes the exact condition which caused the last DMMU fault.
     389 */
     390void describe_dmmu_fault(void)
     391{
     392#if 0
     393        uint64_t myid;
     394        __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
     395
     396        ASSERT(mmu_fsas[myid].dft < 16);
     397
     398        printf("condition which caused the fault: %s\n",
     399                fault_types[mmu_fsas[myid].dft]);
     400}
     401
     402/** Invalidate all unlocked ITLB and DTLB entries. */
     403void tlb_invalidate_all(void)
     404{
     405        uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
     406                MMU_FLAG_DTLB | MMU_FLAG_ITLB);
     407        if (errno != EOK) {
     408                panic("Error code = %d.\n", errno);
     409        }
     410#endif
    515411}
    516412
  • kernel/arch/sparc64/src/trap/exception.c

    reb79d60 rba50a34  
    162162        fault_if_from_uspace(istate, "%s.", __func__);
    163163        dump_istate(istate);
    164         dump_sfsr_and_sfar();
     164//MH
     165//      dump_sfsr_and_sfar();
    165166        panic("%s.", __func__);
    166167}
  • kernel/arch/sparc64/src/trap/sun4v/trap_table.S

    reb79d60 rba50a34  
    10921092        and %g1, NWINDOWS - 1, %g1
    10931093        wrpr %g1, 0, %cwp                       ! CWP--
    1094        
     1094
    10951095.if \is_syscall
    10961096        done
  • kernel/generic/include/errno.h

    reb79d60 rba50a34  
    5757#define EADDRNOTAVAIL   -12     /* Address not available. */
    5858#define ETIMEOUT        -13     /* Timeout expired */
     59//MH
     60#ifndef EINVAL
    5961#define EINVAL          -14     /* Invalid value */
     62#endif
     63#ifndef EBUSY
    6064#define EBUSY           -15     /* Resource is busy */
     65#endif
    6166#define EOVERFLOW       -16     /* The result does not fit its size. */
    6267#define EINTR           -17     /* Operation was interrupted. */
Note: See TracChangeset for help on using the changeset viewer.