Changeset 57da95c in mainline


Ignore:
Timestamp:
2006-09-18T11:47:28Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
29b2bbf
Parents:
f1d1f5d3
Message:
  • Create a dedicated slab cache for as_t objects and switch from malloc/free to slab_alloc/slab_free for

them.

  • Slightly fix and improve both the kernel and userspace atomic_add() on sparc64.
  • More TSB work on the sparc64 front.
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/amd64/include/mm/as.h

    rf1d1f5d3 r57da95c  
    4949
    5050#define as_install_arch(as)
     51#define as_deinstall_arch(as)
    5152#define as_invalidate_translation_cache(as, page, cnt)
    5253
  • kernel/arch/ia32/include/mm/as.h

    rf1d1f5d3 r57da95c  
    4949
    5050#define as_install_arch(as)
     51#define as_deinstall_arch(as)
    5152#define as_invalidate_translation_cache(as, page, cnt)
    5253
  • kernel/arch/ia64/include/mm/as.h

    rf1d1f5d3 r57da95c  
    4848} as_arch_t;
    4949
     50#define as_deinstall_arch(as)
    5051#define as_invalidate_translation_cache(as, page, cnt)
    5152
  • kernel/arch/mips32/include/mm/as.h

    rf1d1f5d3 r57da95c  
    4848} as_arch_t;
    4949
     50#define as_deinstall_arch(as)
    5051#define as_invalidate_translation_cache(as, page, cnt)
    5152
  • kernel/arch/ppc32/include/mm/as.h

    rf1d1f5d3 r57da95c  
    4848} as_arch_t;
    4949
     50#define as_deinstall_arch(as)
    5051#define as_invalidate_translation_cache(as, page, cnt)
    5152
  • kernel/arch/ppc64/include/mm/as.h

    rf1d1f5d3 r57da95c  
    4949
    5050#define as_install_arch(as)
     51#define as_deinstall_arch(as)
    5152#define as_invalidate_translation_cache(as, page, cnt)
    5253
  • kernel/arch/sparc64/include/atomic.h

    rf1d1f5d3 r57da95c  
    5757                a = *((uint64_t *) x);
    5858                b = a + i;
    59                 __asm__ volatile ("casx %0, %1, %2\n": "+m" (*((uint64_t *)x)), "+r" (a), "+r" (b));
     59                __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a));
    6060        } while (a != b);
    6161
  • kernel/arch/sparc64/include/mm/tsb.h

    rf1d1f5d3 r57da95c  
    3737
    3838#include <arch/mm/tte.h>
     39#include <arch/mm/mmu.h>
    3940#include <arch/types.h>
    4041#include <typedefs.h>
     
    4748 * in TLBs - only one TLB entry will do.
    4849 */
    49 #define ITSB_ENTRY_COUNT                2048
    50 #define DTSB_ENTRY_COUNT                2048
     50#define TSB_SIZE                        2                       /* when changing this, change as.c as well */
     51#define ITSB_ENTRY_COUNT                (512*(1<<TSB_SIZE))
     52#define DTSB_ENTRY_COUNT                (512*(1<<TSB_SIZE))
    5153
    5254struct tsb_entry {
     
    5456        tte_data_t data;
    5557} __attribute__ ((packed));
     58typedef struct tsb_entry tsb_entry_t;
    5659
    57 typedef struct tsb_entry tsb_entry_t;
     60/** TSB Base register. */
     61union tsb_base_reg {
     62        uint64_t value;
     63        struct {
     64                uint64_t base : 51;     /**< TSB base address, bits 63:13. */
     65                unsigned split : 1;     /**< Split vs. common TSB for 8K and 64K pages.
     66                                          *  HelenOS uses only 8K pages for user mappings,
     67                                          *  so we always set this to 0.
     68                                          */
     69                unsigned : 9;
     70                unsigned size : 3;      /**< TSB size. Number of entries is 512*2^size. */
     71        } __attribute__ ((packed));
     72};
     73typedef union tsb_base_reg tsb_base_reg_t;
     74
     75/** Read ITSB Base register.
     76 *
     77 * @return Content of the ITSB Base register.
     78 */
     79static inline uint64_t itsb_base_read(void)
     80{
     81        return asi_u64_read(ASI_IMMU, VA_IMMU_TSB_BASE);
     82}
     83
     84/** Read DTSB Base register.
     85 *
     86 * @return Content of the DTSB Base register.
     87 */
     88static inline uint64_t dtsb_base_read(void)
     89{
     90        return asi_u64_read(ASI_DMMU, VA_DMMU_TSB_BASE);
     91}
     92
     93/** Write ITSB Base register.
     94 *
     95 * @param v New content of the ITSB Base register.
     96 */
     97static inline void itsb_base_write(uint64_t v)
     98{
     99        asi_u64_write(ASI_IMMU, VA_IMMU_TSB_BASE, v);
     100}
     101
     102/** Write DTSB Base register.
     103 *
     104 * @param v New content of the DTSB Base register.
     105 */
     106static inline void dtsb_base_write(uint64_t v)
     107{
     108        asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v);
     109}
    58110
    59111extern void tsb_invalidate(as_t *as, uintptr_t page, count_t pages);
  • kernel/arch/sparc64/src/mm/as.c

    rf1d1f5d3 r57da95c  
    3737#include <genarch/mm/as_ht.h>
    3838#include <genarch/mm/asid_fifo.h>
     39#include <debug.h>
     40
     41#ifdef CONFIG_TSB
     42#include <arch/mm/tsb.h>
     43#endif
    3944
    4045/** Architecture dependent address space init. */
     
    4550}
    4651
     52/** Perform sparc64-specific tasks when an address space becomes active on the processor.
     53 *
     54 * Install ASID and map TSBs.
     55 *
     56 * @param as Address space.
     57 */
    4758void as_install_arch(as_t *as)
    4859{
    4960        tlb_context_reg_t ctx;
     61       
     62        /*
     63         * Note that we don't lock the address space.
     64         * That's correct - we can afford it here
     65         * because we only read members that are
     66         * currently read-only.
     67         */
    5068       
    5169        /*
     
    5977        ctx.context = as->asid;
    6078        mmu_secondary_context_write(ctx.v);
     79
     80#ifdef CONFIG_TSB       
     81        if (as != AS_KERNEL) {
     82                uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
     83
     84                ASSERT(as->arch.itsb && as->arch.dtsb);
     85
     86                uintptr_t tsb = as->arch.itsb;
     87               
     88                if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
     89                        /*
     90                         * TSBs were allocated from memory not covered
     91                         * by the locked 4M kernel DTLB entry. We need
     92                         * to map both TSBs explicitly.
     93                         */
     94                        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
     95                        dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
     96                }
     97               
     98                /*
     99                 * Setup TSB Base registers.
     100                 */
     101                tsb_base_reg_t tsb_base;
     102               
     103                tsb_base.value = 0;
     104                tsb_base.size = TSB_SIZE;
     105                tsb_base.split = 0;
     106
     107                tsb_base.base = as->arch.itsb >> PAGE_WIDTH;
     108                itsb_base_write(tsb_base.value);
     109                tsb_base.base = as->arch.dtsb >> PAGE_WIDTH;
     110                dtsb_base_write(tsb_base.value);
     111        }
     112#endif
     113}
     114
     115/** Perform sparc64-specific tasks when an address space is removed from the processor.
     116 *
     117 * Demap TSBs.
     118 *
     119 * @param as Address space.
     120 */
     121void as_deinstall_arch(as_t *as)
     122{
     123
     124        /*
     125         * Note that we don't lock the address space.
     126         * That's correct - we can afford it here
     127         * because we only read members that are
     128         * currently read-only.
     129         */
     130
     131#ifdef CONFIG_TSB
     132        if (as != AS_KERNEL) {
     133                uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
     134
     135                ASSERT(as->arch.itsb && as->arch.dtsb);
     136
     137                uintptr_t tsb = as->arch.itsb;
     138               
     139                if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
     140                        /*
     141                         * TSBs were allocated from memory not covered
     142                         * by the locked 4M kernel DTLB entry. We need
     143                         * to demap the entry installed by as_install_arch().
     144                         */
     145                        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
     146                }
     147               
     148        }
     149#endif
    61150}
    62151
  • kernel/arch/sparc64/src/proc/scheduler.c

    rf1d1f5d3 r57da95c  
    121121        if ((THREAD->flags & THREAD_FLAG_USPACE)) {
    122122                /*
    123                  * If this thread executes also in userspace, we have to force all
    124                  * its still-active userspace windows into the userspace window buffer
    125                  * and demap the buffer from DTLB.
     123                 * If this thread executes also in userspace, we have to
     124                 * demap the userspace window buffer from DTLB.
    126125                 */
    127126                ASSERT(THREAD->arch.uspace_window_buffer);
  • kernel/arch/xen32/include/mm/as.h

    rf1d1f5d3 r57da95c  
    4949
    5050#define as_install_arch(as)
     51#define as_deinstall_arch(as)
    5152#define as_invalidate_translation_cache(as, page, cnt)
    5253
  • kernel/generic/include/mm/as.h

    rf1d1f5d3 r57da95c  
    161161        /** Data to be used by the backend. */
    162162        mem_backend_data_t backend_data;
     163       
     164        as_arch_t arch;
    163165};
    164166
     
    193195extern void as_install_arch(as_t *as);
    194196#endif /* !def as_install_arch */
     197#ifndef as_deinstall_arch
     198extern void as_deinstall_arch(as_t *as);
     199#endif /* !def as_deinstall_arch */
    195200
    196201/* Backend declarations. */
  • kernel/generic/src/mm/as.c

    rf1d1f5d3 r57da95c  
    8585as_operations_t *as_operations = NULL;
    8686
     87/**
     88 * Slab for as_t objects.
     89 */
     90static slab_cache_t *as_slab;
     91
    8792/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
    8893SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
     
    106111{
    107112        as_arch_init();
     113       
     114        as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
     115       
    108116        AS_KERNEL = as_create(FLAG_AS_KERNEL);
    109117        if (!AS_KERNEL)
     
    120128        as_t *as;
    121129
    122         as = (as_t *) malloc(sizeof(as_t), 0);
     130        as = (as_t *) slab_alloc(as_slab, 0);
    123131        link_initialize(&as->inactive_as_with_asid_link);
    124132        mutex_initialize(&as->lock);
     
    183191        interrupts_restore(ipl);
    184192       
    185         free(as);
     193        slab_free(as_slab, as);
    186194}
    187195
     
    799807                }
    800808                mutex_unlock(&old->lock);
     809
     810                /*
     811                 * Perform architecture-specific tasks when the address space
     812                 * is being removed from the CPU.
     813                 */
     814                as_deinstall_arch(old);
    801815        }
    802816
  • uspace/libc/arch/sparc64/include/atomic.h

    rf1d1f5d3 r57da95c  
    5252
    5353        do {
    54                 volatile uintptr_t x = (uint64_t) &val->count;
    55 
    56                 a = *((uint64_t *) x);
     54                a = val->count;
    5755                b = a + i;
    58                 __asm__ volatile ("casx %0, %1, %2\n": "+m" (*((uint64_t *)x)), "+r" (a), "+r" (b));
     56                __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a));
    5957        } while (a != b);
    6058
Note: See TracChangeset for help on using the changeset viewer.