As soon as the upper part of the page fault handler is executed w/o
KERNEL_LOCK(), uvm_anfree_list() will also be executed without it. To not corrupt the value of `uvmexp.swpgonly' counter, use atomic operations to modify it. ok? Index: uvm/uvm_anon.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_anon.c,v retrieving revision 1.51 diff -u -p -r1.51 uvm_anon.c --- uvm/uvm_anon.c 19 Jan 2021 13:21:36 -0000 1.51 +++ uvm/uvm_anon.c 24 Feb 2021 09:48:41 -0000 @@ -120,9 +120,9 @@ uvm_anfree_list(struct vm_anon *anon, st } } else { if (anon->an_swslot != 0) { - /* this page is no longer only in swap. */ + /* This page is no longer only in swap. */ KASSERT(uvmexp.swpgonly > 0); - uvmexp.swpgonly--; + atomic_dec_int(&uvmexp.swpgonly); } } anon->an_lock = NULL; Index: uvm/uvm_aobj.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v retrieving revision 1.90 diff -u -p -r1.90 uvm_aobj.c --- uvm/uvm_aobj.c 11 Jan 2021 18:51:09 -0000 1.90 +++ uvm/uvm_aobj.c 24 Feb 2021 09:50:39 -0000 @@ -381,7 +381,7 @@ uao_free(struct uvm_aobj *aobj) * this page is no longer * only in swap. */ - uvmexp.swpgonly--; + atomic_dec_int(&uvmexp.swpgonly); } next = LIST_NEXT(elt, list); @@ -400,7 +400,7 @@ uao_free(struct uvm_aobj *aobj) if (slot) { uvm_swap_free(slot, 1); /* this page is no longer only in swap. */ - uvmexp.swpgonly--; + atomic_dec_int(&uvmexp.swpgonly); } } free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int)); @@ -1549,6 +1549,6 @@ uao_dropswap_range(struct uvm_object *uo */ if (swpgonlydelta > 0) { KASSERT(uvmexp.swpgonly >= swpgonlydelta); - uvmexp.swpgonly -= swpgonlydelta; + atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); } } Index: uvm/uvm_km.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_km.c,v retrieving revision 1.139 diff -u -p -r1.139 uvm_km.c --- uvm/uvm_km.c 15 Dec 2020 22:14:42 -0000 1.139 +++ uvm/uvm_km.c 24 Feb 2021 09:52:19 -0000 @@ -242,6 +242,7 @@ uvm_km_pgremove(struct uvm_object *uobj, struct vm_page *pp; voff_t curoff; int slot; + int swpgonlydelta = 0; KASSERT(uobj->pgops == &aobj_pager); @@ -262,8 +263,13 @@ uvm_km_pgremove(struct uvm_object *uobj, uvm_pagefree(pp); uvm_unlock_pageq(); } else if (slot != 0) { - uvmexp.swpgonly--; + swpgonlydelta++; } + } + + if (swpgonlydelta > 0) { + KASSERT(uvmexp.swpgonly >= swpgonlydelta); + atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); } } Index: uvm/uvm_pdaemon.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v retrieving revision 1.88 diff -u -p -r1.88 uvm_pdaemon.c --- uvm/uvm_pdaemon.c 24 Nov 2020 13:49:09 -0000 1.88 +++ uvm/uvm_pdaemon.c 24 Feb 2021 09:53:48 -0000 @@ -485,7 +485,7 @@ uvmpd_scan_inactive(struct pglist *pglst if (p->pg_flags & PG_CLEAN) { if (p->pg_flags & PQ_SWAPBACKED) { /* this page now lives only in swap */ - uvmexp.swpgonly++; + atomic_inc_int(&uvmexp.swpgonly); } /* zap all mappings with pmap_page_protect... */ @@ -963,7 +963,7 @@ uvmpd_drop(struct pglist *pglst) if (p->pg_flags & PG_CLEAN) { if (p->pg_flags & PQ_SWAPBACKED) { /* this page now lives only in swap */ - uvmexp.swpgonly++; + atomic_inc_int(&uvmexp.swpgonly); } /* zap all mappings with pmap_page_protect... */ Index: uvm/uvm_swap.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_swap.c,v retrieving revision 1.148 diff -u -p -r1.148 uvm_swap.c --- uvm/uvm_swap.c 14 Dec 2020 13:29:18 -0000 1.148 +++ uvm/uvm_swap.c 24 Feb 2021 09:55:36 -0000 @@ -1574,14 +1574,14 @@ uvm_swap_get(struct vm_page *page, int s KERNEL_LOCK(); /* this page is (about to be) no longer only in swap. */ - uvmexp.swpgonly--; + atomic_dec_int(uvmexp.swpgonly); result = uvm_swap_io(&page, swslot, 1, B_READ | ((flags & PGO_SYNCIO) ? 0 : B_ASYNC)); if (result != VM_PAGER_OK && result != VM_PAGER_PEND) { /* oops, the read failed so it really is still only in swap. */ - uvmexp.swpgonly++; + atomic_inc_int(&uvmexp.swpgonly); } KERNEL_UNLOCK(); return (result); Index: uvm/uvmexp.h =================================================================== RCS file: /cvs/src/sys/uvm/uvmexp.h,v retrieving revision 1.8 diff -u -p -r1.8 uvmexp.h --- uvm/uvmexp.h 28 Dec 2020 14:01:23 -0000 1.8 +++ uvm/uvmexp.h 24 Feb 2021 09:56:06 -0000 @@ -41,6 +41,7 @@ * other than the vm system. * * Locks used to protect struct members in this file: + * a atomic operations * I immutable after creation * K kernel lock * F uvm_lock_fpageq @@ -82,7 +83,7 @@ struct uvmexp { int nswapdev; /* number of configured swap devices in system */ int swpages; /* [K] number of PAGE_SIZE'ed swap pages */ int swpginuse; /* number of swap pages in use */ - int swpgonly; /* [K] number of swap pages in use, not also in RAM */ + int swpgonly; /* [a] number of swap pages in use, not also in RAM */ int nswget; /* number of swap pages moved from disk to RAM */ int nanon; /* XXX number total of anon's in system */ int unused05; /* formerly nanonneeded */ |
On 24/02/21(Wed) 11:33, Martin Pieuchot wrote:
> As soon as the upper part of the page fault handler is executed w/o > KERNEL_LOCK(), uvm_anfree_list() will also be executed without it. > > To not corrupt the value of `uvmexp.swpgonly' counter, use atomic > operations to modify it. > > ok? Anyone? > Index: uvm/uvm_anon.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_anon.c,v > retrieving revision 1.51 > diff -u -p -r1.51 uvm_anon.c > --- uvm/uvm_anon.c 19 Jan 2021 13:21:36 -0000 1.51 > +++ uvm/uvm_anon.c 24 Feb 2021 09:48:41 -0000 > @@ -120,9 +120,9 @@ uvm_anfree_list(struct vm_anon *anon, st > } > } else { > if (anon->an_swslot != 0) { > - /* this page is no longer only in swap. */ > + /* This page is no longer only in swap. */ > KASSERT(uvmexp.swpgonly > 0); > - uvmexp.swpgonly--; > + atomic_dec_int(&uvmexp.swpgonly); > } > } > anon->an_lock = NULL; > Index: uvm/uvm_aobj.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v > retrieving revision 1.90 > diff -u -p -r1.90 uvm_aobj.c > --- uvm/uvm_aobj.c 11 Jan 2021 18:51:09 -0000 1.90 > +++ uvm/uvm_aobj.c 24 Feb 2021 09:50:39 -0000 > @@ -381,7 +381,7 @@ uao_free(struct uvm_aobj *aobj) > * this page is no longer > * only in swap. > */ > - uvmexp.swpgonly--; > + atomic_dec_int(&uvmexp.swpgonly); > } > > next = LIST_NEXT(elt, list); > @@ -400,7 +400,7 @@ uao_free(struct uvm_aobj *aobj) > if (slot) { > uvm_swap_free(slot, 1); > /* this page is no longer only in swap. */ > - uvmexp.swpgonly--; > + atomic_dec_int(&uvmexp.swpgonly); > } > } > free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int)); > @@ -1549,6 +1549,6 @@ uao_dropswap_range(struct uvm_object *uo > */ > if (swpgonlydelta > 0) { > KASSERT(uvmexp.swpgonly >= swpgonlydelta); > - uvmexp.swpgonly -= swpgonlydelta; > + atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); > } > } > Index: uvm/uvm_km.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_km.c,v > retrieving revision 1.139 > diff -u -p -r1.139 uvm_km.c > --- uvm/uvm_km.c 15 Dec 2020 22:14:42 -0000 1.139 > +++ uvm/uvm_km.c 24 Feb 2021 09:52:19 -0000 > @@ -242,6 +242,7 @@ uvm_km_pgremove(struct uvm_object *uobj, > struct vm_page *pp; > voff_t curoff; > int slot; > + int swpgonlydelta = 0; > > KASSERT(uobj->pgops == &aobj_pager); > > @@ -262,8 +263,13 @@ uvm_km_pgremove(struct uvm_object *uobj, > uvm_pagefree(pp); > uvm_unlock_pageq(); > } else if (slot != 0) { > - uvmexp.swpgonly--; > + swpgonlydelta++; > } > + } > + > + if (swpgonlydelta > 0) { > + KASSERT(uvmexp.swpgonly >= swpgonlydelta); > + atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); > } > } > > Index: uvm/uvm_pdaemon.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v > retrieving revision 1.88 > diff -u -p -r1.88 uvm_pdaemon.c > --- uvm/uvm_pdaemon.c 24 Nov 2020 13:49:09 -0000 1.88 > +++ uvm/uvm_pdaemon.c 24 Feb 2021 09:53:48 -0000 > @@ -485,7 +485,7 @@ uvmpd_scan_inactive(struct pglist *pglst > if (p->pg_flags & PG_CLEAN) { > if (p->pg_flags & PQ_SWAPBACKED) { > /* this page now lives only in swap */ > - uvmexp.swpgonly++; > + atomic_inc_int(&uvmexp.swpgonly); > } > > /* zap all mappings with pmap_page_protect... */ > @@ -963,7 +963,7 @@ uvmpd_drop(struct pglist *pglst) > if (p->pg_flags & PG_CLEAN) { > if (p->pg_flags & PQ_SWAPBACKED) { > /* this page now lives only in swap */ > - uvmexp.swpgonly++; > + atomic_inc_int(&uvmexp.swpgonly); > } > > /* zap all mappings with pmap_page_protect... */ > Index: uvm/uvm_swap.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_swap.c,v > retrieving revision 1.148 > diff -u -p -r1.148 uvm_swap.c > --- uvm/uvm_swap.c 14 Dec 2020 13:29:18 -0000 1.148 > +++ uvm/uvm_swap.c 24 Feb 2021 09:55:36 -0000 > @@ -1574,14 +1574,14 @@ uvm_swap_get(struct vm_page *page, int s > > KERNEL_LOCK(); > /* this page is (about to be) no longer only in swap. */ > - uvmexp.swpgonly--; > + atomic_dec_int(uvmexp.swpgonly); > > result = uvm_swap_io(&page, swslot, 1, B_READ | > ((flags & PGO_SYNCIO) ? 0 : B_ASYNC)); > > if (result != VM_PAGER_OK && result != VM_PAGER_PEND) { > /* oops, the read failed so it really is still only in swap. */ > - uvmexp.swpgonly++; > + atomic_inc_int(&uvmexp.swpgonly); > } > KERNEL_UNLOCK(); > return (result); > Index: uvm/uvmexp.h > =================================================================== > RCS file: /cvs/src/sys/uvm/uvmexp.h,v > retrieving revision 1.8 > diff -u -p -r1.8 uvmexp.h > --- uvm/uvmexp.h 28 Dec 2020 14:01:23 -0000 1.8 > +++ uvm/uvmexp.h 24 Feb 2021 09:56:06 -0000 > @@ -41,6 +41,7 @@ > * other than the vm system. > * > * Locks used to protect struct members in this file: > + * a atomic operations > * I immutable after creation > * K kernel lock > * F uvm_lock_fpageq > @@ -82,7 +83,7 @@ struct uvmexp { > int nswapdev; /* number of configured swap devices in system */ > int swpages; /* [K] number of PAGE_SIZE'ed swap pages */ > int swpginuse; /* number of swap pages in use */ > - int swpgonly; /* [K] number of swap pages in use, not also in RAM */ > + int swpgonly; /* [a] number of swap pages in use, not also in RAM */ > int nswget; /* number of swap pages moved from disk to RAM */ > int nanon; /* XXX number total of anon's in system */ > int unused05; /* formerly nanonneeded */ |
> Date: Wed, 3 Mar 2021 10:27:34 +0100
> From: Martin Pieuchot <[hidden email]> > > On 24/02/21(Wed) 11:33, Martin Pieuchot wrote: > > As soon as the upper part of the page fault handler is executed w/o > > KERNEL_LOCK(), uvm_anfree_list() will also be executed without it. > > > > To not corrupt the value of `uvmexp.swpgonly' counter, use atomic > > operations to modify it. > > > > ok? > > Anyone? ok kettenis@ P.S. I'm still looking at the pagedaemon locking diff; that one doesn't look ok to me at first sight. > > Index: uvm/uvm_anon.c > > =================================================================== > > RCS file: /cvs/src/sys/uvm/uvm_anon.c,v > > retrieving revision 1.51 > > diff -u -p -r1.51 uvm_anon.c > > --- uvm/uvm_anon.c 19 Jan 2021 13:21:36 -0000 1.51 > > +++ uvm/uvm_anon.c 24 Feb 2021 09:48:41 -0000 > > @@ -120,9 +120,9 @@ uvm_anfree_list(struct vm_anon *anon, st > > } > > } else { > > if (anon->an_swslot != 0) { > > - /* this page is no longer only in swap. */ > > + /* This page is no longer only in swap. */ > > KASSERT(uvmexp.swpgonly > 0); > > - uvmexp.swpgonly--; > > + atomic_dec_int(&uvmexp.swpgonly); > > } > > } > > anon->an_lock = NULL; > > Index: uvm/uvm_aobj.c > > =================================================================== > > RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v > > retrieving revision 1.90 > > diff -u -p -r1.90 uvm_aobj.c > > --- uvm/uvm_aobj.c 11 Jan 2021 18:51:09 -0000 1.90 > > +++ uvm/uvm_aobj.c 24 Feb 2021 09:50:39 -0000 > > @@ -381,7 +381,7 @@ uao_free(struct uvm_aobj *aobj) > > * this page is no longer > > * only in swap. > > */ > > - uvmexp.swpgonly--; > > + atomic_dec_int(&uvmexp.swpgonly); > > } > > > > next = LIST_NEXT(elt, list); > > @@ -400,7 +400,7 @@ uao_free(struct uvm_aobj *aobj) > > if (slot) { > > uvm_swap_free(slot, 1); > > /* this page is no longer only in swap. */ > > - uvmexp.swpgonly--; > > + atomic_dec_int(&uvmexp.swpgonly); > > } > > } > > free(aobj->u_swslots, M_UVMAOBJ, aobj->u_pages * sizeof(int)); > > @@ -1549,6 +1549,6 @@ uao_dropswap_range(struct uvm_object *uo > > */ > > if (swpgonlydelta > 0) { > > KASSERT(uvmexp.swpgonly >= swpgonlydelta); > > - uvmexp.swpgonly -= swpgonlydelta; > > + atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); > > } > > } > > Index: uvm/uvm_km.c > > =================================================================== > > RCS file: /cvs/src/sys/uvm/uvm_km.c,v > > retrieving revision 1.139 > > diff -u -p -r1.139 uvm_km.c > > --- uvm/uvm_km.c 15 Dec 2020 22:14:42 -0000 1.139 > > +++ uvm/uvm_km.c 24 Feb 2021 09:52:19 -0000 > > @@ -242,6 +242,7 @@ uvm_km_pgremove(struct uvm_object *uobj, > > struct vm_page *pp; > > voff_t curoff; > > int slot; > > + int swpgonlydelta = 0; > > > > KASSERT(uobj->pgops == &aobj_pager); > > > > @@ -262,8 +263,13 @@ uvm_km_pgremove(struct uvm_object *uobj, > > uvm_pagefree(pp); > > uvm_unlock_pageq(); > > } else if (slot != 0) { > > - uvmexp.swpgonly--; > > + swpgonlydelta++; > > } > > + } > > + > > + if (swpgonlydelta > 0) { > > + KASSERT(uvmexp.swpgonly >= swpgonlydelta); > > + atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); > > } > > } > > > > Index: uvm/uvm_pdaemon.c > > =================================================================== > > RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v > > retrieving revision 1.88 > > diff -u -p -r1.88 uvm_pdaemon.c > > --- uvm/uvm_pdaemon.c 24 Nov 2020 13:49:09 -0000 1.88 > > +++ uvm/uvm_pdaemon.c 24 Feb 2021 09:53:48 -0000 > > @@ -485,7 +485,7 @@ uvmpd_scan_inactive(struct pglist *pglst > > if (p->pg_flags & PG_CLEAN) { > > if (p->pg_flags & PQ_SWAPBACKED) { > > /* this page now lives only in swap */ > > - uvmexp.swpgonly++; > > + atomic_inc_int(&uvmexp.swpgonly); > > } > > > > /* zap all mappings with pmap_page_protect... */ > > @@ -963,7 +963,7 @@ uvmpd_drop(struct pglist *pglst) > > if (p->pg_flags & PG_CLEAN) { > > if (p->pg_flags & PQ_SWAPBACKED) { > > /* this page now lives only in swap */ > > - uvmexp.swpgonly++; > > + atomic_inc_int(&uvmexp.swpgonly); > > } > > > > /* zap all mappings with pmap_page_protect... */ > > Index: uvm/uvm_swap.c > > =================================================================== > > RCS file: /cvs/src/sys/uvm/uvm_swap.c,v > > retrieving revision 1.148 > > diff -u -p -r1.148 uvm_swap.c > > --- uvm/uvm_swap.c 14 Dec 2020 13:29:18 -0000 1.148 > > +++ uvm/uvm_swap.c 24 Feb 2021 09:55:36 -0000 > > @@ -1574,14 +1574,14 @@ uvm_swap_get(struct vm_page *page, int s > > > > KERNEL_LOCK(); > > /* this page is (about to be) no longer only in swap. */ > > - uvmexp.swpgonly--; > > + atomic_dec_int(uvmexp.swpgonly); > > > > result = uvm_swap_io(&page, swslot, 1, B_READ | > > ((flags & PGO_SYNCIO) ? 0 : B_ASYNC)); > > > > if (result != VM_PAGER_OK && result != VM_PAGER_PEND) { > > /* oops, the read failed so it really is still only in swap. */ > > - uvmexp.swpgonly++; > > + atomic_inc_int(&uvmexp.swpgonly); > > } > > KERNEL_UNLOCK(); > > return (result); > > Index: uvm/uvmexp.h > > =================================================================== > > RCS file: /cvs/src/sys/uvm/uvmexp.h,v > > retrieving revision 1.8 > > diff -u -p -r1.8 uvmexp.h > > --- uvm/uvmexp.h 28 Dec 2020 14:01:23 -0000 1.8 > > +++ uvm/uvmexp.h 24 Feb 2021 09:56:06 -0000 > > @@ -41,6 +41,7 @@ > > * other than the vm system. > > * > > * Locks used to protect struct members in this file: > > + * a atomic operations > > * I immutable after creation > > * K kernel lock > > * F uvm_lock_fpageq > > @@ -82,7 +83,7 @@ struct uvmexp { > > int nswapdev; /* number of configured swap devices in system */ > > int swpages; /* [K] number of PAGE_SIZE'ed swap pages */ > > int swpginuse; /* number of swap pages in use */ > > - int swpgonly; /* [K] number of swap pages in use, not also in RAM */ > > + int swpgonly; /* [a] number of swap pages in use, not also in RAM */ > > int nswget; /* number of swap pages moved from disk to RAM */ > > int nanon; /* XXX number total of anon's in system */ > > int unused05; /* formerly nanonneeded */ > > |
Free forum by Nabble | Edit this page |