https://www.cse.iitb.ac.in/~mythili/os/labs/lab-xv6-mem/xv6-mem.pdf
vm.c
439 int numpp(void) {
440 pte_t *pte;
441 int cnt = 0; //number of physical page
442 struct proc *curproc = myproc();
443
444 uint sz = curproc->sz;
445 pde_t *pgdir = curproc->pgdir;
446
447 uint a = 0;
448 sz = PGROUNDUP(sz);
449
450 for(;a <= sz; a+=PGSIZE) {
451 pte = walkpgdir(pgdir, (char*)a, 0); //0 flag to not newly allocate page table
452 if(*pte & PTE_P){
453 cnt++;
454 }
455 }
456
457 return cnt;
458 }
vm.c
464 int mmap(int n) {
465 if(n < 0)
466 return 0;
467
468 struct proc *curproc = myproc();
469 pde_t *pgdir = curproc->pgdir;
470
471 int addr = curproc->sz;
472 uint old = PGROUNDUP(addr);
473 uint new = (curproc->sz)+n;
474 for(;old < new; old+=PGSIZE) {
475 walkpgdir(pgdir, (char*)old, 1); //set flag 1 to allocate new page table
476 }
477 curproc->sz += n;
478 switchuvm(curproc); //switchuvm to apply modification of the page directory
479 return addr;
480 }
trap.c
I wrote the trap handler for segmentation fault.
When a exception trap for segmentation fault occurs, this executes trap handler routine.(pgflt())
37 void
38 trap(struct trapframe *tf)
39 {
...
50 switch(tf->trapno){
51 case T_PGFLT:
52 pgflt();
53 break;
482 void pgflt(void) {
483 uint val = rcr2(); //control register 2 stores virtual address that causes page fault
484 struct proc *curproc = myproc();
485 char *mem;
486 uint pa;
487
488 char *addr = (char*)PGROUNDDOWN(val);
489
490 pte_t *pte;
491 pte = walkpgdir(curproc->pgdir, addr, 0);
492
493 mem = kalloc(); //kalloc() to newly allocate the physical page
494 if(mem == 0) {
495 cprintf("allocuvm out of memory\n");
496 return;
497 }
498 memset(mem,0,PGSIZE);
499 pa = V2P(mem);
500 int perm = PTE_W | PTE_U;
501 *pte = pa | perm | PTE_P;
502
503 // switchuvm(curproc); //didn't change page directory.
504 }
A copy-on-write (CoW) fork will let both parent and child use the same memory image initially, and make a copy only when either of them wants to modify any page of the memory image.
To do this, we need following steps.
12 uint refcnt[PPX(PHYSTOP)];
...
26 uint get_refcnt(uint a) {
27
28 return refcnt[PPX(a)];
29 }
30
31 void decre_refcnt(uint a) {
32 refcnt[PPX(a)]--;
33 }
34
35 void incre_refcnt(uint a) {
36 refcnt[PPX(a)]++;
37 }
...
59 void
60 freerange(void *vstart, void *vend)
61 {
62 char *p;
63 p = (char*)PGROUNDUP((uint)vstart);
64 for(; p + PGSIZE <= (char*)vend; p += PGSIZE) {
65 refcnt[PPX(V2P(p))] = 0; //all the initial free page should have refcnt of 0.
66 kfree(p);
67 }
68 }
...
74 void
75 kfree(char *v)
76 {
77 struct run *r;
78
79 if((uint)v % PGSIZE || v < end || V2P(v) >= PHYSTOP)
80 panic("kfree");
81
82 if(kmem.use_lock)
83 acquire(&kmem.lock);
84
85 if(get_refcnt(V2P(v)) > 0) {
86 decre_refcnt(V2P(v));
87 }
88
89 if (get_refcnt(V2P(v)) == 0) {
90 // Fill with junk to catch dangling refs.
91 memset(v, 1, PGSIZE);
92
93 r = (struct run*)v;
94 r->next = kmem.freelist;
95 kmem.freelist = r;
96 }
97
98 if(kmem.use_lock)
99 release(&kmem.lock);
100 }
...
105 char*
106 kalloc(void)
107 {
108 struct run *r;
109
110 if(kmem.use_lock) {
111 acquire(&kmem.lock);
112 }
113
114 r = kmem.freelist;
115 if(r) {
116 kmem.freelist = r->next;
117 incre_refcnt(V2P(r));
118 }
119
120
121
122 if(kmem.use_lock) {
123 release(&kmem.lock);
124 }
125
126 return (char*)r;
127 }
316 pde_t*
317 copyuvm(pde_t *pgdir, uint sz)
318 {
319 pde_t *d;
320 pte_t *pte;
321 uint pa, i, flags;
322 // char *mem;
323
324 if((d = setupkvm()) == 0)
325 return 0;
326 for(i = PGSIZE; i < sz; i += PGSIZE){
327 if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0)
328 panic("copyuvm: pte should exist");
329 if(!(*pte & PTE_P))
330 panic("copyuvm: page not present");
331 *pte &= (~PTE_W); //disable write permission on the page
332 pa = PTE_ADDR(*pte);
333 flags = PTE_FLAGS(*pte);
334 // if((mem = kalloc()) == 0)
335 // goto bad;
336 // memmove(mem, (char*)P2V(pa), PGSIZE);
337 incre_refcnt(pa); //must provide without V2P because pa is already physical address
338 if(mappages(d, (void*)i, PGSIZE, pa, flags) < 0) { //initially shares the physical me
mory
339 // kfree(mem);
340 goto bad;
341 }
342
343 }
344 lcr3(V2P(pgdir));
345 return d;
346
347 bad:
348 freevm(d);
349 return 0;
350 }
487 void pgflt(void) {
488 uint va = rcr2(); //control register 2 stores virtual address that causes page fault
489
490 struct proc *curproc = myproc();
491 pte_t *pte;
492 pte = walkpgdir(curproc->pgdir, (char*)va, 0);
493 uint pa = PTE_ADDR(*pte);
494 uint rc = get_refcnt(pa);
495
496 if(rc > 1) {
497 char *mem;
498 if((mem = kalloc()) == 0) {
499 return;
500 }
501 memmove(mem, (char*)P2V(pa), PGSIZE);
502 *pte = V2P(mem) | PTE_P | PTE_W | PTE_U;
503 decre_refcnt(pa);
504 }
505
506 else if (rc == 1) { //if the last process traps into the page fault, just enabling the
write permission
507 *pte |= PTE_W;
508 }
509
510
511 lcr3(V2P(curproc->pgdir));
512 }
https://www.cse.iitb.ac.in/~mythili/os/labs/lab-xv6-sync/xv6-sync.pdf