0% found this document useful (0 votes)
30 views

"String.h" "MM.H" : #Include #Include #Include #Include

The document describes the implementation of paging-based memory management in C. It includes functions for allocating and freeing memory regions, mapping virtual to physical addresses using page tables, and reading and writing values to memory addresses.

Uploaded by

GH Pham
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
30 views

"String.h" "MM.H" : #Include #Include #Include #Include

The document describes the implementation of paging-based memory management in C. It includes functions for allocating and freeing memory regions, mapping virtual to physical addresses using page tables, and reading and writing values to memory addresses.

Uploaded by

GH Pham
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 14

//#ifdef MM_PAGING

/*
* PAGING based Memory Management
* Virtual memory module mm/mm-vm.c
*/

#include "string.h"
#include "mm.h"
#include <stdlib.h>
#include <stdio.h>

/*enlist_vm_freerg_list - add new rg to freerg_list


*@mm: memory region
*@rg_elmt: new region
*
*/
int enlist_vm_freerg_list(struct mm_struct *mm, struct vm_rg_struct
rg_elmt)
{
struct vm_rg_struct *rg_node = mm->mmap->vm_freerg_list;

if (rg_elmt.rg_start >= rg_elmt.rg_end)


return -1;

if (rg_node != NULL)
rg_elmt.rg_next = rg_node;

/* Enlist the new region */


mm->mmap->vm_freerg_list = &rg_elmt;

return 0;
}

/*get_vma_by_num - get vm area by numID


*@mm: memory region
*@vmaid: ID vm area to alloc memory region
*
*/
struct vm_area_struct *get_vma_by_num(struct mm_struct *mm, int vmaid)
{
struct vm_area_struct *pvma= mm->mmap;

if(mm->mmap == NULL)
return NULL;

int vmait = 0;

while (vmait < vmaid)


{
if(pvma == NULL)
return NULL;

pvma = pvma->vm_next;
}

return pvma;
}

/*get_symrg_byid - get mem region by region ID


*@mm: memory region
*@rgid: region ID act as symbol index of variable
*
*/
struct vm_rg_struct *get_symrg_byid(struct mm_struct *mm, int rgid)
{
if(rgid < 0 || rgid > PAGING_MAX_SYMTBL_SZ)
return NULL;

return &mm->symrgtbl[rgid];
}

/*__alloc - allocate a region memory


*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@rgid: memory region ID (used to identify variable in symbole table)
*@size: allocated size
*@alloc_addr: address of allocated memory region
*
*/
int __alloc(struct pcb_t *caller, int vmaid, int rgid, int size, int
*alloc_addr)
{
/*Allocate at the toproof */
//printf("Before:\n");
//print_list_fp(caller->mram->free_fp_list);
struct vm_rg_struct rgnode;
/* TODO get_free_vmrg_area FAILED handle the region management
(Fig.6)*/
int old_sbrk;
if (get_free_vmrg_area(caller, vmaid, size, &rgnode) != 0)
{
/* TODO handle the region management (lift up sbrk, add page if
needed )*/
/*Attempt to increate limit to get space */
struct vm_area_struct* cur_vma = get_vma_by_num(caller->mm,
vmaid);
int inc_sz = PAGING_PAGE_ALIGNSZ(size);
int incnumpage = inc_sz / PAGING_PAGESZ;
printf("CAll from alloc:%d %d\n",inc_sz, incnumpage);
/* Store old_sbrk for later use */
old_sbrk = cur_vma->sbrk;

/* Increase limit to get space */


/* TODO INCREASE THE LIMIT
* inc_vma_limit(caller, vmaid, inc_sz)
*/
if (inc_vma_limit(caller, vmaid, inc_sz) != 0)
{
return -1;
}

/* Update cur_vma's sbrk */


cur_vma->sbrk += incnumpage * PAGING_PAGESZ;
}
else
{
/* Region found successfully */
old_sbrk = rgnode.rg_start;
}
//printf("After:\n");
/*Successful increase limit */
//print_list_fp(caller->mram->free_fp_list);
caller->mm->symrgtbl[rgid].rg_start = old_sbrk;
caller->mm->symrgtbl[rgid].rg_end = old_sbrk + size;

*alloc_addr = old_sbrk;

return 0;
}
/*__free - remove a region memory
*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@rgid: memory region ID (used to identify variable in symbole table)
*@size: allocated size
*
*/
int __free(struct pcb_t *caller, int vmaid, int rgid) {
struct mm_struct *mm = caller->mm;

if (rgid < 0 || rgid >= PAGING_MAX_SYMTBL_SZ)


return -1;

// Get the vm_area_struct corresponding to vmaid


struct vm_area_struct *vma = get_vma_by_num(mm, vmaid);
if (vma == NULL) {
// VM area not found
return -1;
}

// Get the vm_rg_struct corresponding to rgid


struct vm_rg_struct *rg = get_symrg_byid(mm, rgid);
if (rg == NULL) {
// Symbol region not found
return -1;
}

// Enlist the obsoleted memory region to freerg_list


enlist_vm_rg_node(&(vma->vm_freerg_list), rg);
int size = rg->rg_end - rg->rg_start;
int numpages = PAGING_PAGE_ALIGNSZ(size) / PAGING_PAGESZ;
int fpn = PAGING_FPN(rg->rg_start);
int pgn = PAGING_PGN(rg->rg_start);
for(int i=0;i<numpages;i++){
uint32_t pte = caller->mm->pgd[pgn+i];
//fpn = PAGING_FPN(pte);
if(PAGING_PAGE_PRESENT(pte))
MEMPHY_put_freefp(caller->mram,fpn+i);
}
return 0;
}
/*pgalloc - PAGING-based allocate a region memory
*@proc: Process executing the instruction
*@size: allocated size
*@reg_index: memory region ID (used to identify variable in symbole
table)
*/
int pgalloc(struct pcb_t *proc, uint32_t size, uint32_t reg_index)
{
int addr;

/* By default using vmaid = 0 */


return __alloc(proc, 0, reg_index, size, &addr);
}

/*pgfree - PAGING-based free a region memory


*@proc: Process executing the instruction
*@size: allocated size
*@reg_index: memory region ID (used to identify variable in symbole
table)
*/

int pgfree_data(struct pcb_t *proc, uint32_t reg_index)


{
return __free(proc, 0, reg_index);
}

/*pg_getpage - get the page in ram


*@mm: memory region
*@pagenum: PGN
*@framenum: return FPN
*@caller: caller
*
*/
int pg_getpage(struct mm_struct* mm, int pgn, int* fpn, struct pcb_t*
caller) {
uint32_t pte = mm->pgd[pgn];

if (!PAGING_PAGE_PRESENT(pte)) {
/* Page is not online, make it actively living */
int vicpgn, swpfpn, tgtfpn;
//uint32_t vicpte;
tgtfpn = PAGING_SWP(pte); // The target frame storing our
variable
/* Find victim page */
find_victim_page(caller->mm, &vicpgn);

/* Get free frame in MEMSWP */


MEMPHY_get_freefp(caller->active_mswp, &swpfpn);

/* Copy victim frame to swap */


__swap_cp_page(caller->mram, PAGING_FPN(mm->pgd[vicpgn]),
caller->active_mswp, swpfpn);

/* Copy target frame from swap to memory */


__swap_cp_page(caller->active_mswp, tgtfpn, caller->mram,
PAGING_FPN(mm->pgd[pgn]));

/* Update page table */


pte_set_swap(&mm->pgd[vicpgn], 1, swpfpn);

/* Update the online status of the target page */


pte_set_fpn(&pte, tgtfpn);
enlist_pgn_node(&caller->mm->fifo_pgn, pgn);
}

*fpn = PAGING_FPN(pte);

return 0;
}

/*pg_getval - read value at given offset


*@mm: memory region
*@addr: virtual address to acess
*@value: value
*
*/
int pg_getval(struct mm_struct *mm, int addr, BYTE *data, struct pcb_t
*caller)
{
int pgn = PAGING_PGN(addr);
int off = PAGING_OFFST(addr);
int fpn;

/* Get the page to MEMRAM, swap from MEMSWAP if needed */


if(pg_getpage(mm, pgn, &fpn, caller) != 0)
return -1; /* invalid page access */

int phyaddr = (fpn << PAGING_ADDR_FPN_LOBIT) + off;

MEMPHY_read(caller->mram,phyaddr, data);

return 0;
}

/*pg_setval - write value to given offset


*@mm: memory region
*@addr: virtual address to acess
*@value: value
*
*/
int pg_setval(struct mm_struct *mm, int addr, BYTE value, struct pcb_t
*caller)
{
int pgn = PAGING_PGN(addr);
int off = PAGING_OFFST(addr);
int fpn;

/* Get the page to MEMRAM, swap from MEMSWAP if needed */


if(pg_getpage(mm, pgn, &fpn, caller) != 0)
return -1; /* invalid page access */

int phyaddr = (fpn << PAGING_ADDR_FPN_LOBIT) + off;

MEMPHY_write(caller->mram,phyaddr, value);

return 0;
}

/*__read - read value in region memory


*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@offset: offset to acess in memory region
*@rgid: memory region ID (used to identify variable in symbole table)
*@size: allocated size
*
*/
int __read(struct pcb_t *caller, int vmaid, int rgid, int offset, BYTE
*data)
{
struct vm_rg_struct *currg = get_symrg_byid(caller->mm, rgid);

struct vm_area_struct *cur_vma = get_vma_by_num(caller->mm, vmaid);

if(currg == NULL || cur_vma == NULL) /* Invalid memory identify */


return -1;

pg_getval(caller->mm, currg->rg_start + offset, data, caller);

return 0;
}

/*pgwrite - PAGING-based read a region memory */


int pgread(
struct pcb_t * proc, // Process executing the instruction
uint32_t source, // Index of source register
uint32_t offset, // Source address = [source] + [offset]
uint32_t destination)
{
BYTE data;
int val = __read(proc, 0, source, offset, &data);

destination = (uint32_t) data;


#ifdef IODUMP
printf("read region=%d offset=%d value=%d\n", source, offset, data);
#ifdef PAGETBL_DUMP
print_pgtbl(proc, 0, -1); //print max TBL
#endif
MEMPHY_dump(proc->mram);
#endif

return val;
}

/*__write - write a region memory


*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@offset: offset to acess in memory region
*@rgid: memory region ID (used to identify variable in symbole table)
*@size: allocated size
*
*/
int __write(struct pcb_t *caller, int vmaid, int rgid, int offset, BYTE
value)
{
struct vm_rg_struct *currg = get_symrg_byid(caller->mm, rgid);

struct vm_area_struct *cur_vma = get_vma_by_num(caller->mm, vmaid);

if(currg == NULL || cur_vma == NULL) /* Invalid memory identify */


return -1;

pg_setval(caller->mm, currg->rg_start + offset, value, caller);

return 0;
}

/*pgwrite - PAGING-based write a region memory */


int pgwrite(
struct pcb_t * proc, // Process executing the instruction
BYTE data, // Data to be wrttien into memory
uint32_t destination, // Index of destination register
uint32_t offset)
{
#ifdef IODUMP
printf("write region=%d offset=%d value=%d\n", destination, offset,
data);
#ifdef PAGETBL_DUMP
print_pgtbl(proc, 0, -1); //print max TBL
#endif
MEMPHY_dump(proc->mram);
#endif

return __write(proc, 0, destination, offset, data);


}

/*free_pcb_memphy - collect all memphy of pcb


*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@incpgnum: number of page
*/
int free_pcb_memph(struct pcb_t *caller)
{
int pagenum, fpn;
uint32_t pte;

for(pagenum = 0; pagenum < PAGING_MAX_PGN; pagenum++)


{
pte= caller->mm->pgd[pagenum];

if (!PAGING_PAGE_PRESENT(pte))
{
fpn = PAGING_FPN(pte);
MEMPHY_put_freefp(caller->mram, fpn);
} else {
fpn = PAGING_SWP(pte);
MEMPHY_put_freefp(caller->active_mswp, fpn);
}
}

return 0;
}

/*get_vm_area_node - get vm area for a number of pages


*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@incpgnum: number of page
*@vmastart: vma end
*@vmaend: vma end
*
*/
struct vm_rg_struct* get_vm_area_node_at_brk(struct pcb_t *caller, int
vmaid, int size, int alignedsz)
{
struct vm_rg_struct * newrg;
struct vm_area_struct *cur_vma = get_vma_by_num(caller->mm, vmaid);

newrg = malloc(sizeof(struct vm_rg_struct));

newrg->rg_start = cur_vma->sbrk;
newrg->rg_end = newrg->rg_start + size;

return newrg;
}
/*validate_overlap_vm_area
*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@vmastart: vma end
*@vmaend: vma end
*
*/
int validate_overlap_vm_area(struct pcb_t *caller, int vmaid, int
vmastart, int vmaend)
{
struct vm_area_struct *vma = caller->mm->mmap;
/* TODO validate the planned memory area is not overlapped */
while (vma) {
if (vma->vm_id != vmaid) {
/* If the new region overlaps with an existing one, return
error */
if (vmaend > vma->vm_start && vmastart < vma->vm_end) {
return -1;
}
}
vma = vma->vm_next;
}
// No overlapping regions found
return 0;
}

/*inc_vma_limit - increase vm area limits to reserve space for new


variable
*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@inc_sz: increment size
*
*/
int inc_vma_limit(struct pcb_t *caller, int vmaid, int inc_sz)
{
struct vm_rg_struct * newrg = malloc(sizeof(struct vm_rg_struct));
int inc_amt = PAGING_PAGE_ALIGNSZ(inc_sz);
int incnumpage = inc_amt / PAGING_PAGESZ;
//printf("CALL from inc vma limit:%d %d\n",inc_amt,incnumpage);
struct vm_rg_struct *area = get_vm_area_node_at_brk(caller, vmaid,
inc_sz, inc_amt);
struct vm_area_struct *cur_vma = get_vma_by_num(caller->mm, vmaid);
int old_end = cur_vma->vm_end;

/*Validate overlap of obtained region */


if (validate_overlap_vm_area(caller, vmaid, area->rg_start,
area->rg_end) < 0)
return -1; /*Overlap and failed allocation */

/* The obtained vm area (only)


* now will be alloc real ram region */
cur_vma->vm_end += inc_sz;
if (vm_map_ram(caller, area->rg_start, area->rg_end,
old_end, incnumpage , newrg) < 0)
return -1; /* Map the memory to MEMRAM */

return 0;

/*find_victim_page - find victim page


*@caller: caller
*@pgn: return page number
*
*/
int find_victim_page(struct mm_struct *mm, int *retpgn)
{
struct pgn_t *pg = mm->fifo_pgn;

/* TODO: Implement the theorical mechanism to find the victim page */


if (pg != NULL) {
*retpgn = pg->pgn;
struct pgn_t* victim = mm->fifo_pgn;
mm->fifo_pgn = mm->fifo_pgn->pg_next;
free(victim);
return 0;
}
/* Page not found */
return -1;
}

/*get_free_vmrg_area - get a free vm region


*@caller: caller
*@vmaid: ID vm area to alloc memory region
*@size: allocated size
*
*/
int get_free_vmrg_area(struct pcb_t *caller, int vmaid, int size,
struct vm_rg_struct *newrg)
{
struct vm_area_struct *cur_vma = get_vma_by_num(caller->mm, vmaid);

struct vm_rg_struct *rgit = cur_vma->vm_freerg_list;

if (rgit == NULL)
return -1;

/* Probe unintialized newrg */


newrg->rg_start = newrg->rg_end = -1;

/* Traverse on list of free vm region to find a fit space */


while (rgit != NULL)
{
if (rgit->rg_start + size <= rgit->rg_end)
{ /* Current region has enough space */
newrg->rg_start = rgit->rg_start;
newrg->rg_end = rgit->rg_start + size;

/* Update left space in chosen region */


if (rgit->rg_start + size < rgit->rg_end)
{
rgit->rg_start = rgit->rg_start + size;
}
else
{ /*Use up all space, remove current node */
/*Clone next rg node */
struct vm_rg_struct *nextrg = rgit->rg_next;

/*Cloning */
if (nextrg != NULL)
{
rgit->rg_start = nextrg->rg_start;
rgit->rg_end = nextrg->rg_end;

rgit->rg_next = nextrg->rg_next;

free(nextrg);
}
else
{ /*End of free list */
rgit->rg_start = rgit->rg_end; //dummy, size 0 region
rgit->rg_next = NULL;
}
}
}
else
{
rgit = rgit->rg_next; // Traverse next rg
}
}

if(newrg->rg_start == -1) // new region not found


return -1;

return 0;
}

//#endif

Code of mm-vm.c

You might also like