Start by moving `pgo_fault' handler outside of uvm_fault_lower().
If a page has a backing object that prefer to handler to fault itself the locking will be different, so keep it under KERNEL_LOCK() for the moment and make it separate from the rest of uvm_fault_lower(). ok? Index: uvm/uvm_fault.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_fault.c,v retrieving revision 1.115 diff -u -p -r1.115 uvm_fault.c --- uvm/uvm_fault.c 16 Feb 2021 09:10:17 -0000 1.115 +++ uvm/uvm_fault.c 16 Feb 2021 10:13:58 -0000 @@ -598,10 +598,37 @@ uvm_fault(vm_map_t orig_map, vaddr_t vad /* case 1: fault on an anon in our amap */ error = uvm_fault_upper(&ufi, &flt, anons, fault_type); } else { - /* case 2: fault on backing object or zero fill */ - KERNEL_LOCK(); - error = uvm_fault_lower(&ufi, &flt, pages, fault_type); - KERNEL_UNLOCK(); + struct uvm_object *uobj = ufi.entry->object.uvm_obj; + + /* + * if the desired page is not shadowed by the amap and + * we have a backing object, then we check to see if + * the backing object would prefer to handle the fault + * itself (rather than letting us do it with the usual + * pgo_get hook). the backing object signals this by + * providing a pgo_fault routine. + */ + if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { + KERNEL_LOCK(); + error = uobj->pgops->pgo_fault(&ufi, + flt.startva, pages, flt.npages, + flt.centeridx, fault_type, flt.access_type, + PGO_LOCKED); + KERNEL_UNLOCK(); + + if (error == VM_PAGER_OK) + error = 0; + else if (error == VM_PAGER_REFAULT) + error = ERESTART; + else + error = EACCES; + } else { + /* case 2: fault on backing obj or zero fill */ + KERNEL_LOCK(); + error = uvm_fault_lower(&ufi, &flt, pages, + fault_type); + KERNEL_UNLOCK(); + } } } @@ -1054,26 +1081,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf struct vm_anon *anon = NULL; vaddr_t currva; voff_t uoff; - - /* - * if the desired page is not shadowed by the amap and we have a - * backing object, then we check to see if the backing object would - * prefer to handle the fault itself (rather than letting us do it - * with the usual pgo_get hook). the backing object signals this by - * providing a pgo_fault routine. - */ - if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { - result = uobj->pgops->pgo_fault(ufi, flt->startva, pages, - flt->npages, flt->centeridx, fault_type, flt->access_type, - PGO_LOCKED); - - if (result == VM_PAGER_OK) - return (0); /* pgo_fault did pmap enter */ - else if (result == VM_PAGER_REFAULT) - return ERESTART; /* try again! */ - else - return (EACCES); - } /* * now, if the desired page is not shadowed by the amap and we have |
On 16/02/21(Tue) 11:20, Martin Pieuchot wrote:
> Start by moving `pgo_fault' handler outside of uvm_fault_lower(). > > If a page has a backing object that prefer to handler to fault itself > the locking will be different, so keep it under KERNEL_LOCK() for the > moment and make it separate from the rest of uvm_fault_lower(). > > ok? Anyone? > > Index: uvm/uvm_fault.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_fault.c,v > retrieving revision 1.115 > diff -u -p -r1.115 uvm_fault.c > --- uvm/uvm_fault.c 16 Feb 2021 09:10:17 -0000 1.115 > +++ uvm/uvm_fault.c 16 Feb 2021 10:13:58 -0000 > @@ -598,10 +598,37 @@ uvm_fault(vm_map_t orig_map, vaddr_t vad > /* case 1: fault on an anon in our amap */ > error = uvm_fault_upper(&ufi, &flt, anons, fault_type); > } else { > - /* case 2: fault on backing object or zero fill */ > - KERNEL_LOCK(); > - error = uvm_fault_lower(&ufi, &flt, pages, fault_type); > - KERNEL_UNLOCK(); > + struct uvm_object *uobj = ufi.entry->object.uvm_obj; > + > + /* > + * if the desired page is not shadowed by the amap and > + * we have a backing object, then we check to see if > + * the backing object would prefer to handle the fault > + * itself (rather than letting us do it with the usual > + * pgo_get hook). the backing object signals this by > + * providing a pgo_fault routine. > + */ > + if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > + KERNEL_LOCK(); > + error = uobj->pgops->pgo_fault(&ufi, > + flt.startva, pages, flt.npages, > + flt.centeridx, fault_type, flt.access_type, > + PGO_LOCKED); > + KERNEL_UNLOCK(); > + > + if (error == VM_PAGER_OK) > + error = 0; > + else if (error == VM_PAGER_REFAULT) > + error = ERESTART; > + else > + error = EACCES; > + } else { > + /* case 2: fault on backing obj or zero fill */ > + KERNEL_LOCK(); > + error = uvm_fault_lower(&ufi, &flt, pages, > + fault_type); > + KERNEL_UNLOCK(); > + } > } > } > > @@ -1054,26 +1081,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf > struct vm_anon *anon = NULL; > vaddr_t currva; > voff_t uoff; > - > - /* > - * if the desired page is not shadowed by the amap and we have a > - * backing object, then we check to see if the backing object would > - * prefer to handle the fault itself (rather than letting us do it > - * with the usual pgo_get hook). the backing object signals this by > - * providing a pgo_fault routine. > - */ > - if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > - result = uobj->pgops->pgo_fault(ufi, flt->startva, pages, > - flt->npages, flt->centeridx, fault_type, flt->access_type, > - PGO_LOCKED); > - > - if (result == VM_PAGER_OK) > - return (0); /* pgo_fault did pmap enter */ > - else if (result == VM_PAGER_REFAULT) > - return ERESTART; /* try again! */ > - else > - return (EACCES); > - } > > /* > * now, if the desired page is not shadowed by the amap and we have > |
Martin Pieuchot [[hidden email]] wrote:
> On 16/02/21(Tue) 11:20, Martin Pieuchot wrote: > > Start by moving `pgo_fault' handler outside of uvm_fault_lower(). > > > > If a page has a backing object that prefer to handler to fault itself > > the locking will be different, so keep it under KERNEL_LOCK() for the > > moment and make it separate from the rest of uvm_fault_lower(). > > > > ok? > > Anyone? > It makes sense, and works on my workstation... |
In reply to this post by Martin Pieuchot
> Date: Mon, 22 Feb 2021 10:10:21 +0100
> From: Martin Pieuchot <[hidden email]> > > On 16/02/21(Tue) 11:20, Martin Pieuchot wrote: > > Start by moving `pgo_fault' handler outside of uvm_fault_lower(). > > > > If a page has a backing object that prefer to handler to fault itself > > the locking will be different, so keep it under KERNEL_LOCK() for the > > moment and make it separate from the rest of uvm_fault_lower(). > > > > ok? > > Anyone? This diverges from NetBSD; you think that's a good idea? > > Index: uvm/uvm_fault.c > > =================================================================== > > RCS file: /cvs/src/sys/uvm/uvm_fault.c,v > > retrieving revision 1.115 > > diff -u -p -r1.115 uvm_fault.c > > --- uvm/uvm_fault.c 16 Feb 2021 09:10:17 -0000 1.115 > > +++ uvm/uvm_fault.c 16 Feb 2021 10:13:58 -0000 > > @@ -598,10 +598,37 @@ uvm_fault(vm_map_t orig_map, vaddr_t vad > > /* case 1: fault on an anon in our amap */ > > error = uvm_fault_upper(&ufi, &flt, anons, fault_type); > > } else { > > - /* case 2: fault on backing object or zero fill */ > > - KERNEL_LOCK(); > > - error = uvm_fault_lower(&ufi, &flt, pages, fault_type); > > - KERNEL_UNLOCK(); > > + struct uvm_object *uobj = ufi.entry->object.uvm_obj; > > + > > + /* > > + * if the desired page is not shadowed by the amap and > > + * we have a backing object, then we check to see if > > + * the backing object would prefer to handle the fault > > + * itself (rather than letting us do it with the usual > > + * pgo_get hook). the backing object signals this by > > + * providing a pgo_fault routine. > > + */ > > + if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > > + KERNEL_LOCK(); > > + error = uobj->pgops->pgo_fault(&ufi, > > + flt.startva, pages, flt.npages, > > + flt.centeridx, fault_type, flt.access_type, > > + PGO_LOCKED); > > + KERNEL_UNLOCK(); > > + > > + if (error == VM_PAGER_OK) > > + error = 0; > > + else if (error == VM_PAGER_REFAULT) > > + error = ERESTART; > > + else > > + error = EACCES; > > + } else { > > + /* case 2: fault on backing obj or zero fill */ > > + KERNEL_LOCK(); > > + error = uvm_fault_lower(&ufi, &flt, pages, > > + fault_type); > > + KERNEL_UNLOCK(); > > + } > > } > > } > > > > @@ -1054,26 +1081,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf > > struct vm_anon *anon = NULL; > > vaddr_t currva; > > voff_t uoff; > > - > > - /* > > - * if the desired page is not shadowed by the amap and we have a > > - * backing object, then we check to see if the backing object would > > - * prefer to handle the fault itself (rather than letting us do it > > - * with the usual pgo_get hook). the backing object signals this by > > - * providing a pgo_fault routine. > > - */ > > - if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > > - result = uobj->pgops->pgo_fault(ufi, flt->startva, pages, > > - flt->npages, flt->centeridx, fault_type, flt->access_type, > > - PGO_LOCKED); > > - > > - if (result == VM_PAGER_OK) > > - return (0); /* pgo_fault did pmap enter */ > > - else if (result == VM_PAGER_REFAULT) > > - return ERESTART; /* try again! */ > > - else > > - return (EACCES); > > - } > > > > /* > > * now, if the desired page is not shadowed by the amap and we have > > > > |
On 23/02/21(Tue) 00:24, Mark Kettenis wrote:
> > Date: Mon, 22 Feb 2021 10:10:21 +0100 > > From: Martin Pieuchot <[hidden email]> > > > > On 16/02/21(Tue) 11:20, Martin Pieuchot wrote: > > > Start by moving `pgo_fault' handler outside of uvm_fault_lower(). > > > > > > If a page has a backing object that prefer to handler to fault itself > > > the locking will be different, so keep it under KERNEL_LOCK() for the > > > moment and make it separate from the rest of uvm_fault_lower(). > > > > > > ok? > > > > Anyone? > > This diverges from NetBSD; you think that's a good idea? Which tree are you looking at? I'm doing this exactly to reduce differences with NetBSD. r1.228 of uvm_fault.c in NetBSD contain this logic in uvm_fault_internal(). > > > Index: uvm/uvm_fault.c > > > =================================================================== > > > RCS file: /cvs/src/sys/uvm/uvm_fault.c,v > > > retrieving revision 1.115 > > > diff -u -p -r1.115 uvm_fault.c > > > --- uvm/uvm_fault.c 16 Feb 2021 09:10:17 -0000 1.115 > > > +++ uvm/uvm_fault.c 16 Feb 2021 10:13:58 -0000 > > > @@ -598,10 +598,37 @@ uvm_fault(vm_map_t orig_map, vaddr_t vad > > > /* case 1: fault on an anon in our amap */ > > > error = uvm_fault_upper(&ufi, &flt, anons, fault_type); > > > } else { > > > - /* case 2: fault on backing object or zero fill */ > > > - KERNEL_LOCK(); > > > - error = uvm_fault_lower(&ufi, &flt, pages, fault_type); > > > - KERNEL_UNLOCK(); > > > + struct uvm_object *uobj = ufi.entry->object.uvm_obj; > > > + > > > + /* > > > + * if the desired page is not shadowed by the amap and > > > + * we have a backing object, then we check to see if > > > + * the backing object would prefer to handle the fault > > > + * itself (rather than letting us do it with the usual > > > + * pgo_get hook). the backing object signals this by > > > + * providing a pgo_fault routine. > > > + */ > > > + if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > > > + KERNEL_LOCK(); > > > + error = uobj->pgops->pgo_fault(&ufi, > > > + flt.startva, pages, flt.npages, > > > + flt.centeridx, fault_type, flt.access_type, > > > + PGO_LOCKED); > > > + KERNEL_UNLOCK(); > > > + > > > + if (error == VM_PAGER_OK) > > > + error = 0; > > > + else if (error == VM_PAGER_REFAULT) > > > + error = ERESTART; > > > + else > > > + error = EACCES; > > > + } else { > > > + /* case 2: fault on backing obj or zero fill */ > > > + KERNEL_LOCK(); > > > + error = uvm_fault_lower(&ufi, &flt, pages, > > > + fault_type); > > > + KERNEL_UNLOCK(); > > > + } > > > } > > > } > > > > > > @@ -1054,26 +1081,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf > > > struct vm_anon *anon = NULL; > > > vaddr_t currva; > > > voff_t uoff; > > > - > > > - /* > > > - * if the desired page is not shadowed by the amap and we have a > > > - * backing object, then we check to see if the backing object would > > > - * prefer to handle the fault itself (rather than letting us do it > > > - * with the usual pgo_get hook). the backing object signals this by > > > - * providing a pgo_fault routine. > > > - */ > > > - if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > > > - result = uobj->pgops->pgo_fault(ufi, flt->startva, pages, > > > - flt->npages, flt->centeridx, fault_type, flt->access_type, > > > - PGO_LOCKED); > > > - > > > - if (result == VM_PAGER_OK) > > > - return (0); /* pgo_fault did pmap enter */ > > > - else if (result == VM_PAGER_REFAULT) > > > - return ERESTART; /* try again! */ > > > - else > > > - return (EACCES); > > > - } > > > > > > /* > > > * now, if the desired page is not shadowed by the amap and we have > > > > > > > |
> Date: Tue, 23 Feb 2021 10:07:43 +0100
> From: Martin Pieuchot <[hidden email]> > > On 23/02/21(Tue) 00:24, Mark Kettenis wrote: > > > Date: Mon, 22 Feb 2021 10:10:21 +0100 > > > From: Martin Pieuchot <[hidden email]> > > > > > > On 16/02/21(Tue) 11:20, Martin Pieuchot wrote: > > > > Start by moving `pgo_fault' handler outside of uvm_fault_lower(). > > > > > > > > If a page has a backing object that prefer to handler to fault itself > > > > the locking will be different, so keep it under KERNEL_LOCK() for the > > > > moment and make it separate from the rest of uvm_fault_lower(). > > > > > > > > ok? > > > > > > Anyone? > > > > This diverges from NetBSD; you think that's a good idea? > > Which tree are you looking at? I'm doing this exactly to reduce > differences with NetBSD. r1.228 of uvm_fault.c in NetBSD contain this > logic in uvm_fault_internal(). Was looking at r1.221. So go ahead then. > > > > Index: uvm/uvm_fault.c > > > > =================================================================== > > > > RCS file: /cvs/src/sys/uvm/uvm_fault.c,v > > > > retrieving revision 1.115 > > > > diff -u -p -r1.115 uvm_fault.c > > > > --- uvm/uvm_fault.c 16 Feb 2021 09:10:17 -0000 1.115 > > > > +++ uvm/uvm_fault.c 16 Feb 2021 10:13:58 -0000 > > > > @@ -598,10 +598,37 @@ uvm_fault(vm_map_t orig_map, vaddr_t vad > > > > /* case 1: fault on an anon in our amap */ > > > > error = uvm_fault_upper(&ufi, &flt, anons, fault_type); > > > > } else { > > > > - /* case 2: fault on backing object or zero fill */ > > > > - KERNEL_LOCK(); > > > > - error = uvm_fault_lower(&ufi, &flt, pages, fault_type); > > > > - KERNEL_UNLOCK(); > > > > + struct uvm_object *uobj = ufi.entry->object.uvm_obj; > > > > + > > > > + /* > > > > + * if the desired page is not shadowed by the amap and > > > > + * we have a backing object, then we check to see if > > > > + * the backing object would prefer to handle the fault > > > > + * itself (rather than letting us do it with the usual > > > > + * pgo_get hook). the backing object signals this by > > > > + * providing a pgo_fault routine. > > > > + */ > > > > + if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > > > > + KERNEL_LOCK(); > > > > + error = uobj->pgops->pgo_fault(&ufi, > > > > + flt.startva, pages, flt.npages, > > > > + flt.centeridx, fault_type, flt.access_type, > > > > + PGO_LOCKED); > > > > + KERNEL_UNLOCK(); > > > > + > > > > + if (error == VM_PAGER_OK) > > > > + error = 0; > > > > + else if (error == VM_PAGER_REFAULT) > > > > + error = ERESTART; > > > > + else > > > > + error = EACCES; > > > > + } else { > > > > + /* case 2: fault on backing obj or zero fill */ > > > > + KERNEL_LOCK(); > > > > + error = uvm_fault_lower(&ufi, &flt, pages, > > > > + fault_type); > > > > + KERNEL_UNLOCK(); > > > > + } > > > > } > > > > } > > > > > > > > @@ -1054,26 +1081,6 @@ uvm_fault_lower(struct uvm_faultinfo *uf > > > > struct vm_anon *anon = NULL; > > > > vaddr_t currva; > > > > voff_t uoff; > > > > - > > > > - /* > > > > - * if the desired page is not shadowed by the amap and we have a > > > > - * backing object, then we check to see if the backing object would > > > > - * prefer to handle the fault itself (rather than letting us do it > > > > - * with the usual pgo_get hook). the backing object signals this by > > > > - * providing a pgo_fault routine. > > > > - */ > > > > - if (uobj != NULL && uobj->pgops->pgo_fault != NULL) { > > > > - result = uobj->pgops->pgo_fault(ufi, flt->startva, pages, > > > > - flt->npages, flt->centeridx, fault_type, flt->access_type, > > > > - PGO_LOCKED); > > > > - > > > > - if (result == VM_PAGER_OK) > > > > - return (0); /* pgo_fault did pmap enter */ > > > > - else if (result == VM_PAGER_REFAULT) > > > > - return ERESTART; /* try again! */ > > > > - else > > > > - return (EACCES); > > > > - } > > > > > > > > /* > > > > * now, if the desired page is not shadowed by the amap and we have > > > > > > > > > > > |
Free forum by Nabble | Edit this page |