mmu.c 14.0 KB
Newer Older
G
guo 已提交
1 2 3 4 5 6 7 8
/*
 * Copyright (c) 2006-2021, RT-Thread Development Team
 *
 * SPDX-License-Identifier: Apache-2.0
 *
 * Change Logs:
 * Date           Author       Notes
 * 2021-01-30     lizhirui     first version
9
 * 2022-12-13     WangXiaoyao  Port to new mm
G
guo 已提交
10 11 12
 */

#include <rtthread.h>
13 14 15
#include <stddef.h>
#include <stdint.h>

16 17 18 19
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>

G
guo 已提交
20
#include <cache.h>
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>

#ifdef RT_USING_SMART
#include <ioremap.h>
#include <lwp_user_mm.h>
#endif

#ifndef RT_USING_SMART
#define PV_OFFSET 0
#define USER_VADDR_START 0
#endif

static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size);

39 40 41 42 43
static void *current_mmu_table = RT_NULL;

volatile __attribute__((aligned(4 * 1024)))
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];

C
chenhy0106 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static rt_uint8_t ASID_BITS = 0;
static rt_uint16_t next_asid;
static rt_uint64_t global_asid_generation;
#define ASID_MASK ((1 << ASID_BITS) - 1)
#define ASID_FIRST_GENERATION (1 << ASID_BITS)
#define MAX_ASID ASID_FIRST_GENERATION

static void _asid_init()
{
    unsigned int satp_reg = read_csr(satp);
    satp_reg |= (((rt_uint64_t)0xffff) << PPN_BITS);
    write_csr(satp, satp_reg);
    unsigned short valid_asid_bit = ((read_csr(satp) >> PPN_BITS) & 0xffff);

    // The maximal value of ASIDLEN, is 9 for Sv32 or 16 for Sv39, Sv48, and Sv57
    for (unsigned i = 0; i < 16; i++)
    {
        if (!(valid_asid_bit & 0x1))
        {
            break;
        }

        valid_asid_bit >>= 1;
        ASID_BITS++;
    }

    global_asid_generation = ASID_FIRST_GENERATION;
    next_asid = 1;
}

static rt_uint64_t _asid_check_switch(rt_aspace_t aspace)
{
    if ((aspace->asid ^ global_asid_generation) >> ASID_BITS) // not same generation
    {
        if (next_asid != MAX_ASID)
        {
            aspace->asid = global_asid_generation | next_asid;
            next_asid++;
        }
        else
        {
            // scroll to next generation
            global_asid_generation += ASID_FIRST_GENERATION;
            next_asid = 1;
            rt_hw_tlb_invalidate_all_local();

            aspace->asid = global_asid_generation | next_asid;
            next_asid++;
        }
    }

    return aspace->asid & ASID_MASK;
}

98 99 100
void rt_hw_aspace_switch(rt_aspace_t aspace)
{
    uintptr_t page_table = (uintptr_t)_rt_kmem_v2p(aspace->page_table);
101
    current_mmu_table = aspace->page_table;
G
guo 已提交
102

C
chenhy0106 已提交
103
    rt_uint64_t asid = _asid_check_switch(aspace);
104
    write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
C
chenhy0106 已提交
105
                        (asid << PPN_BITS) |
106
                        ((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
C
chenhy0106 已提交
107
    asm volatile("sfence.vma x0,%0"::"r"(asid):"memory");
108
}
G
guo 已提交
109 110 111 112 113 114

void *rt_hw_mmu_tbl_get()
{
    return current_mmu_table;
}

115 116
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
                         size_t attr)
G
guo 已提交
117
{
118 119
    rt_size_t l1_off, l2_off, l3_off;
    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
G
guo 已提交
120

121 122 123
    l1_off = GET_L1((size_t)va);
    l2_off = GET_L2((size_t)va);
    l3_off = GET_L3((size_t)va);
G
guo 已提交
124

125
    mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
G
guo 已提交
126

127
    if (PTE_USED(*mmu_l1))
G
guo 已提交
128
    {
129
        mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
G
guo 已提交
130
    }
131
    else
G
guo 已提交
132
    {
133
        mmu_l2 = (rt_size_t *)rt_pages_alloc(0);
G
guo 已提交
134

135
        if (mmu_l2)
G
guo 已提交
136
        {
137 138 139 140 141 142 143 144 145
            rt_memset(mmu_l2, 0, PAGE_SIZE);
            rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
            *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
                                 PAGE_DEFAULT_ATTR_NEXT);
            rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
        }
        else
        {
            return -1;
G
guo 已提交
146 147 148
        }
    }

149
    if (PTE_USED(*(mmu_l2 + l2_off)))
G
guo 已提交
150
    {
151 152 153
        RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
        mmu_l3 =
            (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
G
guo 已提交
154
    }
155
    else
G
guo 已提交
156
    {
157
        mmu_l3 = (rt_size_t *)rt_pages_alloc(0);
G
guo 已提交
158

159
        if (mmu_l3)
G
guo 已提交
160
        {
161 162 163 164 165 166 167 168
            rt_memset(mmu_l3, 0, PAGE_SIZE);
            rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
            *(mmu_l2 + l2_off) =
                COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
                           PAGE_DEFAULT_ATTR_NEXT);
            rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
            // declares a reference to parent page table
            rt_page_ref_inc((void *)mmu_l2, 0);
G
guo 已提交
169 170 171
        }
        else
        {
172
            return -1;
G
guo 已提交
173 174 175
        }
    }

176 177 178 179 180
    RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
    // declares a reference to parent page table
    rt_page_ref_inc((void *)mmu_l3, 0);
    *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)pa, attr);
    rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
G
guo 已提交
181 182 183
    return 0;
}

184 185 186
/** rt_hw_mmu_map will never override existed page table entry */
void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
                    size_t size, size_t attr)
G
guo 已提交
187
{
188 189 190
    int ret = -1;
    void *unmap_va = v_addr;
    size_t npages = size >> ARCH_PAGE_SHIFT;
G
guo 已提交
191

192 193
    // TODO trying with HUGEPAGE here
    while (npages--)
G
guo 已提交
194
    {
195 196
        ret = _map_one_page(aspace, v_addr, p_addr, attr);
        if (ret != 0)
G
guo 已提交
197
        {
198 199
            /* error, undo map */
            while (unmap_va != v_addr)
G
guo 已提交
200
            {
201 202 203 204
                MM_PGTBL_LOCK(aspace);
                _unmap_area(aspace, unmap_va, ARCH_PAGE_SIZE);
                MM_PGTBL_UNLOCK(aspace);
                unmap_va += ARCH_PAGE_SIZE;
G
guo 已提交
205
            }
206
            break;
G
guo 已提交
207
        }
208 209 210
        v_addr += ARCH_PAGE_SIZE;
        p_addr += ARCH_PAGE_SIZE;
    }
G
guo 已提交
211

212 213 214
    if (ret == 0)
    {
        return unmap_va;
G
guo 已提交
215 216
    }

217
    return NULL;
G
guo 已提交
218 219
}

220
static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
G
guo 已提交
221
{
222 223
    int loop_flag = 1;
    while (loop_flag)
G
guo 已提交
224
    {
225 226 227 228 229 230
        loop_flag = 0;
        *pentry = 0;
        rt_hw_cpu_dcache_clean(pentry, sizeof(*pentry));

        // we don't handle level 0, which is maintained by caller
        if (level > 0)
G
guo 已提交
231
        {
232
            void *page = (void *)((rt_ubase_t)pentry & ~ARCH_PAGE_MASK);
G
guo 已提交
233

234 235
            // decrease reference from child page to parent
            rt_pages_free(page, 0);
G
guo 已提交
236

237 238
            int free = rt_page_ref_get(page, 0);
            if (free == 1)
G
guo 已提交
239
            {
240 241 242
                rt_pages_free(page, 0);
                pentry = lvl_entry[--level];
                loop_flag = 1;
G
guo 已提交
243 244 245 246 247
            }
        }
    }
}

248
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
G
guo 已提交
249
{
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
    size_t unmapped = 0;

    int i = 0;
    rt_size_t lvl_off[3];
    rt_size_t *lvl_entry[3];
    lvl_off[0] = (rt_size_t)GET_L1(loop_va);
    lvl_off[1] = (rt_size_t)GET_L2(loop_va);
    lvl_off[2] = (rt_size_t)GET_L3(loop_va);
    unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);

    rt_size_t *pentry;
    lvl_entry[i] = ((rt_size_t *)aspace->page_table + lvl_off[i]);
    pentry = lvl_entry[i];

    // find leaf page table entry
    while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
G
guo 已提交
267
    {
268 269 270 271 272
        i += 1;
        lvl_entry[i] = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
                        lvl_off[i]);
        pentry = lvl_entry[i];
        unmapped >>= ARCH_INDEX_WIDTH;
G
guo 已提交
273 274
    }

275 276
    // clear PTE & setup its
    if (PTE_USED(*pentry))
G
guo 已提交
277
    {
278 279
        _unmap_pte(pentry, lvl_entry, i);
    }
G
guo 已提交
280

281 282
    return unmapped;
}
G
guo 已提交
283

284 285 286 287 288 289 290 291 292
/** unmap is different from map that it can handle multiple pages */
void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
{
    // caller guarantee that v_addr & size are page aligned
    if (!aspace->page_table)
    {
        return;
    }
    size_t unmapped = 0;
G
guo 已提交
293

294 295 296 297 298
    while (size > 0)
    {
        MM_PGTBL_LOCK(aspace);
        unmapped = _unmap_area(aspace, v_addr, size);
        MM_PGTBL_UNLOCK(aspace);
G
guo 已提交
299

300 301 302
        // when unmapped == 0, region not exist in pgtbl
        if (!unmapped || unmapped > size)
            break;
G
guo 已提交
303

304 305
        size -= unmapped;
        v_addr += unmapped;
G
guo 已提交
306
    }
307
}
G
guo 已提交
308

309 310 311 312 313 314
#ifdef RT_USING_SMART
static inline void _init_region(void *vaddr, size_t size)
{
    rt_ioremap_start = vaddr;
    rt_ioremap_size = size;
    rt_mpr_start = rt_ioremap_start - rt_mpr_size;
315
    LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start, rt_mpr_start);
316 317 318 319 320
}
#else
static inline void _init_region(void *vaddr, size_t size)
{
    rt_mpr_start = vaddr - rt_mpr_size;
G
guo 已提交
321
}
322
#endif
G
guo 已提交
323

324 325
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
                       rt_size_t *vtable, rt_size_t pv_off)
G
guo 已提交
326
{
327 328
    size_t l1_off, va_s, va_e;
    rt_base_t level;
G
guo 已提交
329

330
    if ((!aspace) || (!vtable))
G
guo 已提交
331
    {
332
        return -1;
G
guo 已提交
333 334
    }

335 336
    va_s = (rt_size_t)v_address;
    va_e = ((rt_size_t)v_address) + size - 1;
G
guo 已提交
337

338
    if (va_e < va_s)
G
guo 已提交
339
    {
340 341
        return -1;
    }
G
guo 已提交
342

343 344 345
    // convert address to PPN2 index
    va_s = GET_L1(va_s);
    va_e = GET_L1(va_e);
G
guo 已提交
346

347
    if (va_s == 0)
G
guo 已提交
348
    {
349
        return -1;
G
guo 已提交
350 351
    }

352 353
    // vtable initialization check
    for (l1_off = va_s; l1_off <= va_e; l1_off++)
G
guo 已提交
354
    {
355
        size_t v = vtable[l1_off];
G
guo 已提交
356

357
        if (v)
G
guo 已提交
358
        {
359
            return -1;
G
guo 已提交
360 361 362
        }
    }

363 364 365 366
    rt_aspace_init(&rt_kernel_space, (void *)0x1000, USER_VADDR_START - 0x1000,
                   vtable);

    _init_region(v_address, size);
G
guo 已提交
367 368 369
    return 0;
}

370 371 372 373
const static int max_level =
    (ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT) / ARCH_INDEX_WIDTH;

static inline uintptr_t _get_level_size(int level)
G
guo 已提交
374
{
375 376
    return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
}
G
guo 已提交
377

378 379 380 381 382
static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
{
    rt_size_t l1_off, l2_off, l3_off;
    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
    rt_size_t pa;
G
guo 已提交
383

384 385 386 387 388
    l1_off = GET_L1((rt_size_t)vaddr);
    l2_off = GET_L2((rt_size_t)vaddr);
    l3_off = GET_L3((rt_size_t)vaddr);

    if (!aspace)
G
guo 已提交
389
    {
390 391 392
        LOG_W("%s: no aspace", __func__);
        return RT_NULL;
    }
G
guo 已提交
393

394
    mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
G
guo 已提交
395

396 397 398
    if (PTE_USED(*mmu_l1))
    {
        if (*mmu_l1 & PTE_XWR_MASK)
G
guo 已提交
399
        {
400 401
            *level = 1;
            return mmu_l1;
G
guo 已提交
402 403
        }

404
        mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
G
guo 已提交
405

406
        if (PTE_USED(*(mmu_l2 + l2_off)))
G
guo 已提交
407
        {
408
            if (*(mmu_l2 + l2_off) & PTE_XWR_MASK)
G
guo 已提交
409
            {
410 411
                *level = 2;
                return mmu_l2 + l2_off;
G
guo 已提交
412 413
            }

414 415 416 417 418 419 420 421
            mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
                                             PV_OFFSET);

            if (PTE_USED(*(mmu_l3 + l3_off)))
            {
                *level = 3;
                return mmu_l3 + l3_off;
            }
G
guo 已提交
422
        }
423
    }
G
guo 已提交
424

425
    return RT_NULL;
G
guo 已提交
426 427
}

428
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
G
guo 已提交
429
{
430 431 432
    int level;
    uintptr_t *pte = _query(aspace, vaddr, &level);
    uintptr_t paddr;
G
guo 已提交
433

434
    if (pte)
G
guo 已提交
435
    {
436 437
        paddr = GET_PADDR(*pte);
        paddr |= ((intptr_t)vaddr & (_get_level_size(level) - 1));
G
guo 已提交
438 439 440
    }
    else
    {
441 442
        LOG_I("%s: failed at %p", __func__, vaddr);
        paddr = (uintptr_t)ARCH_MAP_FAILED;
G
guo 已提交
443
    }
444
    return (void *)paddr;
G
guo 已提交
445 446
}

447
static int _noncache(uintptr_t *pte)
G
guo 已提交
448
{
449
    return 0;
G
guo 已提交
450 451
}

452
static int _cache(uintptr_t *pte)
G
guo 已提交
453
{
454
    return 0;
G
guo 已提交
455 456
}

457 458 459 460
static int (*control_handler[MMU_CNTL_DUMMY_END])(uintptr_t *pte) = {
    [MMU_CNTL_CACHE] = _cache,
    [MMU_CNTL_NONCACHE] = _noncache,
};
G
guo 已提交
461

462 463
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                      enum rt_mmu_cntl cmd)
G
guo 已提交
464
{
465 466 467
    int level;
    int err = -RT_EINVAL;
    void *vend = vaddr + size;
G
guo 已提交
468

469 470 471 472
    int (*handler)(uintptr_t * pte);
    if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
    {
        handler = control_handler[cmd];
G
guo 已提交
473

474 475 476 477
        while (vaddr < vend)
        {
            uintptr_t *pte = _query(aspace, vaddr, &level);
            void *range_end = vaddr + _get_level_size(level);
478
            RT_ASSERT(range_end <= vend);
G
guo 已提交
479

480 481 482 483 484 485 486 487 488
            if (pte)
            {
                err = handler(pte);
                RT_ASSERT(err == RT_EOK);
            }
            vaddr = range_end;
        }
    }
    else
G
guo 已提交
489
    {
490
        err = -RT_ENOSYS;
G
guo 已提交
491 492
    }

493 494
    return err;
}
G
guo 已提交
495

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
/**
 * @brief setup Page Table for kernel space. It's a fixed map
 * and all mappings cannot be changed after initialization.
 *
 * Memory region in struct mem_desc must be page aligned,
 * otherwise is a failure and no report will be
 * returned.
 *
 * @param mmu_info
 * @param mdesc
 * @param desc_nr
 */
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
{
    void *err;
    for (size_t i = 0; i < desc_nr; i++)
G
guo 已提交
512
    {
513 514
        size_t attr;
        switch (mdesc->attr)
G
guo 已提交
515
        {
516 517 518 519 520 521 522 523 524 525 526
        case NORMAL_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case NORMAL_NOCACHE_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case DEVICE_MEM:
            attr = MMU_MAP_K_DEVICE;
            break;
        default:
            attr = MMU_MAP_K_DEVICE;
G
guo 已提交
527
        }
528 529 530 531 532 533 534 535 536 537 538

        struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
                                    .limit_start = aspace->start,
                                    .limit_range_size = aspace->size,
                                    .map_size = mdesc->vaddr_end -
                                                mdesc->vaddr_start + 1,
                                    .prefer = (void *)mdesc->vaddr_start};

        rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
                                 mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
        mdesc++;
G
guo 已提交
539 540
    }

C
chenhy0106 已提交
541 542
    _asid_init();

543 544
    rt_hw_aspace_switch(&rt_kernel_space);
    rt_page_cleanup();
G
guo 已提交
545 546
}

547
void rt_hw_mmu_kernel_map_init(rt_aspace_t aspace, rt_size_t vaddr_start, rt_size_t size)
G
guo 已提交
548
{
549 550 551 552 553 554 555 556 557 558 559 560 561
    rt_size_t paddr_start =
        __UMASKVALUE(VPN_TO_PPN(vaddr_start, PV_OFFSET), PAGE_OFFSET_MASK);
    rt_size_t va_s = GET_L1(vaddr_start);
    rt_size_t va_e = GET_L1(vaddr_start + size - 1);
    rt_size_t i;

    for (i = va_s; i <= va_e; i++)
    {
        MMUTable[i] =
            COMBINEPTE(paddr_start, PAGE_ATTR_RWX | PTE_G | PTE_V | PTE_CACHE |
                                        PTE_SHARE | PTE_BUF | PTE_A | PTE_D);
        paddr_start += L1_PAGE_SIZE;
    }
G
guo 已提交
562

563
    rt_hw_tlb_invalidate_all_local();
G
guo 已提交
564
}