xv6-riscv-kernel/kernel/riscv.h

466 lines
8.6 KiB
C
Raw Normal View History

#pragma once
#ifndef __ASSEMBLER__
#include "types.h"
2019-06-05 17:42:03 +02:00
// which hart (core) is this?
2024-05-24 11:26:40 +02:00
static inline u64
2019-06-05 17:42:03 +02:00
r_mhartid()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, mhartid" : "=r"(x));
2019-06-05 17:42:03 +02:00
return x;
}
2019-05-31 15:45:59 +02:00
// Machine Status Register, mstatus
2019-07-25 11:35:03 +02:00
#define MSTATUS_MPP_MASK (3L << 11) // previous mode.
2024-06-15 16:55:06 +02:00
#define MSTATUS_MPP_M (3L << 11)
#define MSTATUS_MPP_S (1L << 11)
#define MSTATUS_MPP_U (0L << 11)
#define MSTATUS_MIE (1L << 3) // machine-mode interrupt enable.
2019-05-31 15:45:59 +02:00
2024-05-24 11:26:40 +02:00
static inline u64
2019-05-31 15:45:59 +02:00
r_mstatus()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, mstatus" : "=r"(x));
2019-05-31 15:45:59 +02:00
return x;
}
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_mstatus(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw mstatus, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
// machine exception program counter, holds the
// instruction address to which a return from
// exception will go.
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_mepc(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw mepc, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
// Supervisor Status Register, sstatus
/** Supervisor Previous Privilege */
#define SSTATUS_SPP (1L << 8) // Previous mode, 1=Supervisor, 0=User
2019-05-31 15:45:59 +02:00
/** Supervisor Previous Interrupt Enable */
#define SSTATUS_SPIE (1L << 5)
/** User Previous Interrupt Enable */
#define SSTATUS_UPIE (1L << 4)
/** Supervisor Interrupt Enable */
#define SSTATUS_SIE (1L << 1)
/** User Interrupt Enable */
#define SSTATUS_UIE (1L << 0)
/**
* Read the value of the sstatus register.
* (Supervisor Status Register)
*/
2024-05-24 11:26:40 +02:00
static inline u64
2019-05-31 15:45:59 +02:00
r_sstatus()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, sstatus" : "=r"(x));
2019-05-31 15:45:59 +02:00
return x;
}
/**
* Write a value to the sstatus register.
* (Supervisor Status Register)
*/
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_sstatus(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw sstatus, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
/** Read Suporvisor Interrupt Pending */
2024-05-24 11:26:40 +02:00
static inline u64
r_sip()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, sip" : "=r"(x));
return x;
}
/** Write Suporvisor Interrupt Pending */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_sip(u64 x)
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw sip, %0" : : "r"(x));
}
/** Supervisor External Interrup Enable */
#define SIE_SEIE (1L << 9)
/** Supervisor Timer Interrupt Enable */
#define SIE_STIE (1L << 5)
/** Supervisor Software Interrupt Enable */
#define SIE_SSIE (1L << 1)
/**
* Read the value of the sie register.
* (Supervisor Interrupt Enable)
*/
2024-05-24 11:26:40 +02:00
static inline u64
r_sie()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, sie" : "=r"(x));
return x;
}
/**
* Write the valie to the sie rgister
* (Supervisor Interrupt Enable)
*/
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_sie(u64 x)
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw sie, %0" : : "r"(x));
}
/** Machine External Interrupt Enable */
#define MIE_MEIE (1L << 11)
/** Machine Timer Interrupt Enable */
#define MIE_MTIE (1L << 7)
/** Machine Software Interrupt Enable */
#define MIE_MSIE (1L << 3)
/**
* Read the value of the mie register.
* (Machine Interrupt Enable)
*/
2024-05-24 11:26:40 +02:00
static inline u64
r_mie()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, mie" : "=r"(x));
return x;
}
/**
* Write the value to the mie register.
* (Machine Interrupt Enable)
*/
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_mie(u64 x)
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw mie, %0" : : "r"(x));
}
2021-05-16 20:56:46 +02:00
// supervisor exception program counter, holds the
2019-05-31 15:45:59 +02:00
// instruction address to which a return from
// exception will go.
/** Write Supervisor Exception Program Counter */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_sepc(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw sepc, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
/** Read Supervisor Exception Program Counter */
2024-05-24 11:26:40 +02:00
static inline u64
2019-05-31 15:45:59 +02:00
r_sepc()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, sepc" : "=r"(x));
2019-05-31 15:45:59 +02:00
return x;
}
/** Read Machine Exception Delegation */
2024-05-24 11:26:40 +02:00
static inline u64
2019-05-31 15:45:59 +02:00
r_medeleg()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, medeleg" : "=r"(x));
2019-05-31 15:45:59 +02:00
return x;
}
/** Write Machine Exception Delegation */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_medeleg(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw medeleg, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
/** Read Machine Interrupt Delegation */
2024-05-24 11:26:40 +02:00
static inline u64
2019-05-31 15:45:59 +02:00
r_mideleg()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, mideleg" : "=r"(x));
2019-05-31 15:45:59 +02:00
return x;
}
/** Write Machine Interrupt Delegation */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_mideleg(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw mideleg, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
/** Write Supervisor Trap-Vector Base Address */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_stvec(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw stvec, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
/** Read Supervisor Trap-Vector Base Address */
2024-05-24 11:26:40 +02:00
static inline u64
r_stvec()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, stvec" : "=r"(x));
return x;
}
/** Write Machine Trap-Vector Base Address */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_mtvec(u64 x)
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw mtvec, %0" : : "r"(x));
}
/** Read Physical Memory Protection Configuration */
2021-08-30 22:27:52 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_pmpcfg0(u64 x)
2021-08-30 22:27:52 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw pmpcfg0, %0" : : "r"(x));
2021-08-30 22:27:52 +02:00
}
/** Write Physical Memory Protection Configuration */
2021-08-30 22:27:52 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_pmpaddr0(u64 x)
2021-08-30 22:27:52 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw pmpaddr0, %0" : : "r"(x));
2021-08-30 22:27:52 +02:00
}
/** Risc-v's sv39 page table scheme. */
2019-05-31 15:45:59 +02:00
#define SATP_SV39 (8L << 60)
/** Make Supervisor Address Translation and Protection */
2024-05-24 11:26:40 +02:00
#define MAKE_SATP(pagetable) (SATP_SV39 | (((u64)pagetable) >> 12))
2019-05-31 15:45:59 +02:00
/**
* Write the value to the satp register.
* (Supervisor Address Translation and Protection)
*
* This register holds the address of the page table.
*/
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_satp(u64 x)
2019-05-31 15:45:59 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw satp, %0" : : "r"(x));
2019-05-31 15:45:59 +02:00
}
/**
* Read the value of the satp register.
* (Supervisor Address Translation and Protection)
* Returns the address of the page table.
*/
2024-05-24 11:26:40 +02:00
static inline u64
2019-05-31 15:45:59 +02:00
r_satp()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, satp" : "=r"(x));
2019-05-31 15:45:59 +02:00
return x;
}
/** Read Supervisor Scratch Register */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_mscratch(u64 x)
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw mscratch, %0" : : "r"(x));
}
/** Supervisor Trap Cause */
2024-05-24 11:26:40 +02:00
static inline u64
2019-05-31 15:45:59 +02:00
r_scause()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, scause" : "=r"(x));
2019-05-31 15:45:59 +02:00
return x;
}
/** Supervisor Trap Value */
2024-05-24 11:26:40 +02:00
static inline u64
r_stval()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, stval" : "=r"(x));
return x;
}
/** Write Machine-mode Counter-Enable Register */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_mcounteren(u64 x)
{
2024-06-15 16:55:06 +02:00
asm volatile("csrw mcounteren, %0" : : "r"(x));
}
/** Read Machine-mode Counter-Enable Register */
2024-05-24 11:26:40 +02:00
static inline u64
r_mcounteren()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, mcounteren" : "=r"(x));
return x;
}
/**
* Machine-mode cycle counter
* Reports the current wall-clock time from the timer device.
*/
2024-05-24 11:26:40 +02:00
static inline u64
r_time()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("csrr %0, time" : "=r"(x));
return x;
}
/** Enable device interrupts */
static inline void
intr_on()
{
w_sstatus(r_sstatus() | SSTATUS_SIE);
}
/** Disable device interrupts */
static inline void
intr_off()
{
w_sstatus(r_sstatus() & ~SSTATUS_SIE);
}
/** Are device interrupts enabled? */
static inline int
intr_get()
{
2024-05-24 11:26:40 +02:00
u64 x = r_sstatus();
return (x & SSTATUS_SIE) != 0;
}
/** Read stack pointer */
2024-05-24 11:26:40 +02:00
static inline u64
r_sp()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("mv %0, sp" : "=r"(x));
return x;
}
2022-08-09 20:17:46 +02:00
// read and write tp, the thread pointer, which xv6 uses to hold
2019-06-05 17:42:03 +02:00
// this core's hartid (core number), the index into cpus[].
/** Read thread pointer */
2024-05-24 11:26:40 +02:00
static inline u64
2019-06-05 17:42:03 +02:00
r_tp()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("mv %0, tp" : "=r"(x));
2019-06-05 17:42:03 +02:00
return x;
}
/** Write thread pointer */
2024-06-15 16:55:06 +02:00
static inline void
2024-05-24 11:26:40 +02:00
w_tp(u64 x)
2019-06-05 17:42:03 +02:00
{
2024-06-15 16:55:06 +02:00
asm volatile("mv tp, %0" : : "r"(x));
2019-06-05 17:42:03 +02:00
}
/** Read the return address */
2024-05-24 11:26:40 +02:00
static inline u64
r_ra()
{
2024-05-24 11:26:40 +02:00
u64 x;
2024-06-15 16:55:06 +02:00
asm volatile("mv %0, ra" : "=r"(x));
return x;
}
/** Flush the TLB (Translation Lookaside Buffer) */
static inline void
sfence_vma()
{
// the zero, zero means flush all TLB entries.
asm volatile("sfence.vma zero, zero");
}
/** Page Table Entry Type */
typedef u64 pte_t;
/** Page Table Type */
2024-05-24 11:26:40 +02:00
typedef u64 *pagetable_t; // 512 PTEs
#endif // __ASSEMBLER__
/** Page Size */
#define PGSIZE 4096 // bytes per page
/** Page Shift, bits of offset within a page */
#define PGSHIFT 12
2019-05-31 15:45:59 +02:00
2024-06-15 16:55:06 +02:00
#define PGROUNDUP(sz) (((sz) + PGSIZE - 1) & ~(PGSIZE - 1))
#define PGROUNDDOWN(a) (((a)) & ~(PGSIZE - 1))
2019-05-31 15:45:59 +02:00
/**
* Page Table Entry Flags
*/
#define PTE_V (1L << 0) /** PTE Valid */
#define PTE_R (1L << 1) /** PTE Readable */
#define PTE_W (1L << 2) /** PTE Writeable */
#define PTE_X (1L << 3) /** PTE Executable */
#define PTE_U (1L << 4) /** PTE User Accessible */
2019-05-31 15:45:59 +02:00
/**
* Helper macros to shift a physical address
* to the right place for a PTE.
*/
/** Physical Address to Page Table Entry */
2024-05-24 11:26:40 +02:00
#define PA2PTE(pa) ((((u64)pa) >> 12) << 10)
2019-05-31 15:45:59 +02:00
/** Page Table Entry to Physical Address */
2019-05-31 15:45:59 +02:00
#define PTE2PA(pte) (((pte) >> 10) << 12)
/** Page Table Entry Flags */
2019-09-17 12:07:58 +02:00
#define PTE_FLAGS(pte) ((pte) & 0x3FF)
2019-05-31 15:45:59 +02:00
/**
* Helper macros to extract the three 9-bit
* page table indices from a virtual address.
*/
/** Page Extract Mask */
#define PXMASK 0x1FF // 9 bits, 0b111111111
/** Page Extract Shift */
2024-06-15 16:55:06 +02:00
#define PXSHIFT(level) (PGSHIFT + (9 * (level)))
2019-05-31 15:45:59 +02:00
/** Page Extract */
#define PX(level, va) ((((u64)(va)) >> PXSHIFT(level)) & PXMASK)
/**
* One beyond the highest possible virtual address.
* MAXVA is actually one bit less than the max allowed by
* Sv39, to avoid having to sign-extend virtual addresses
* that have the high bit set.
*/
2019-05-31 15:45:59 +02:00
#define MAXVA (1L << (9 + 9 + 9 + 12 - 1))