97typedef enum page_prot {
113typedef enum page_cache {
127typedef enum page_size {
143typedef struct mmupage {
164#define MMU_SUB_PAGES 512
174typedef struct mmusubcontext {
179#define MMU_PAGES 1024
189typedef struct mmucontext {
287 bool share,
bool dirty);
page_cache_t
Definition mmu.h:113
@ MMU_CACHE_BACK
Write-back caching.
Definition mmu.h:115
@ MMU_CACHE_WT
Write-through caching.
Definition mmu.h:116
@ MMU_CACHEABLE
Default caching.
Definition mmu.h:117
@ MMU_NO_CACHE
Cache disabled.
Definition mmu.h:114
page_size_t
Definition mmu.h:127
@ PAGE_SIZE_1K
Definition mmu.h:128
@ PAGE_SIZE_4K
Definition mmu.h:129
@ PAGE_SIZE_64K
Definition mmu.h:130
@ PAGE_SIZE_1M
Definition mmu.h:131
page_prot_t
Definition mmu.h:97
@ MMU_ALL_RDONLY
Read-only user and kernel.
Definition mmu.h:100
@ MMU_KERNEL_RDWR
No user access, kernel full.
Definition mmu.h:99
@ MMU_ALL_RDWR
Full access, user and kernel.
Definition mmu.h:101
@ MMU_KERNEL_RDONLY
No user access, kernel read-only.
Definition mmu.h:98
void mmu_context_destroy(mmucontext_t *context)
Destroy an MMU context when a process is being destroyed.
mmu_mapfunc_t mmu_map_set_callback(mmu_mapfunc_t newfunc)
Set a new MMU mapping handler.
#define MMU_SUB_PAGES
The number of pages in a sub-context.
Definition mmu.h:164
int mmu_copyin(mmucontext_t *context, uint32_t srcaddr, uint32_t srccnt, void *buffer)
Copy a chunk of data from a process' address space into a kernel buffer, taking into account page map...
void mmu_switch_context(mmucontext_t *context)
Switch to the given context.
int mmu_phys_to_virt(mmucontext_t *context, int physpage)
Using the given page tables, translate the physical page ID to a virtual page ID.
mmupage_t *(* mmu_mapfunc_t)(mmucontext_t *context, int virtpage)
MMU mapping handler.
Definition mmu.h:329
void mmu_init_basic(void)
Initialize basic MMU support.
mmucontext_t * mmu_context_create(int asid)
Allocate a new MMU context.
void mmu_page_map(mmucontext_t *context, int virtpage, int physpage, int count, page_prot_t prot, page_cache_t cache, bool share, bool dirty)
Set the given virtual page to map to the given physical page.
void mmu_use_table(mmucontext_t *context)
Set the "current" page tables for TLB handling.
void mmu_shutdown_basic(void)
Shutdown basic MMU support.
int mmu_virt_to_phys(mmucontext_t *context, int virtpage)
Using the given page tables, translate the virtual page ID to a physical page ID.
void mmu_shutdown(void)
Shutdown MMU support.
int mmu_page_map_static(uintptr_t virt, uintptr_t phys, page_size_t page_size, page_prot_t page_prot, bool cached)
Create a static virtual memory maping.
void mmu_set_sq_addr(void *addr)
Reset the base target address for store queues.
void mmu_reset_itlb(void)
Reset ITLB.
void mmu_init(void)
Initialize MMU support.
int mmu_copyv(mmucontext_t *context1, struct iovec *iov1, int iovcnt1, mmucontext_t *context2, struct iovec *iov2, int iovcnt2)
Copy a chunk of data from one process' address space to another process' address space,...
mmu_mapfunc_t mmu_map_get_callback(void)
Get the current mapping function.
bool mmu_enabled(void)
Check if MMU translation is enabled.
#define MMU_PAGES
The number of sub-contexts in the main level context.
Definition mmu.h:179
I/O vector structure.
Definition uio.h:34
MMU context type.
Definition mmu.h:189
int asid
Address Space ID.
Definition mmu.h:191
MMU TLB entry for a single page.
Definition mmu.h:143
uint32_t shared
Shared between procs – 1 bit.
Definition mmu.h:149
uint32_t valid
Valid mapping – 1 bit.
Definition mmu.h:148
uint32_t dirty
Dirty – 1 bit.
Definition mmu.h:151
uint32_t ptel
Pre-built PTEL value.
Definition mmu.h:158
uint32_t cache
Cacheable – 1 bit.
Definition mmu.h:150
uint32_t blank
Reserved – 7 bits.
Definition mmu.h:153
uint32_t physical
Physical page ID – 18 bits.
Definition mmu.h:146
uint32_t wthru
Write-thru enable – 1 bit.
Definition mmu.h:152
uint32_t pteh
Pre-built PTEH value.
Definition mmu.h:157
uint32_t prkey
Protection key data – 2 bits.
Definition mmu.h:147
MMU sub-context type.
Definition mmu.h:174