KASSERT() for VOP_*

classic Classic list List threaded Threaded
2 messages Options
Reply | Threaded
Open this post in threaded view
|

KASSERT() for VOP_*

Martin Pieuchot
This is mostly the same diff that has been backed out months ago with
the VOP_CLOSE() case fixed.  VOP_CLOSE() can accept a NULL argument
instead of `curproc' when garbage collecting passed FDs.

The intent is to stop passing a "struct proc *" when a function applies
only to `curproc'.  Synchronization/locking primitives are obviously
different if a CPU can modify the fields of any thread or only of the
current one.

Index: kern/vfs_vops.c
===================================================================
RCS file: /cvs/src/sys/kern/vfs_vops.c,v
retrieving revision 1.28
diff -u -p -r1.28 vfs_vops.c
--- kern/vfs_vops.c 8 Apr 2020 08:07:51 -0000 1.28
+++ kern/vfs_vops.c 27 Apr 2020 08:10:02 -0000
@@ -145,6 +145,8 @@ VOP_OPEN(struct vnode *vp, int mode, str
  a.a_cred = cred;
  a.a_p = p;
 
+ KASSERT(p == curproc);
+
  if (vp->v_op->vop_open == NULL)
  return (EOPNOTSUPP);
 
@@ -164,6 +166,7 @@ VOP_CLOSE(struct vnode *vp, int fflag, s
  a.a_cred = cred;
  a.a_p = p;
 
+ KASSERT(p == NULL || p == curproc);
  ASSERT_VP_ISLOCKED(vp);
 
  if (vp->v_op->vop_close == NULL)
@@ -184,6 +187,7 @@ VOP_ACCESS(struct vnode *vp, int mode, s
  a.a_cred = cred;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  ASSERT_VP_ISLOCKED(vp);
 
  if (vp->v_op->vop_access == NULL)
@@ -202,6 +206,7 @@ VOP_GETATTR(struct vnode *vp, struct vat
  a.a_cred = cred;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  if (vp->v_op->vop_getattr == NULL)
  return (EOPNOTSUPP);
 
@@ -219,6 +224,7 @@ VOP_SETATTR(struct vnode *vp, struct vat
  a.a_cred = cred;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  ASSERT_VP_ISLOCKED(vp);
 
  if (vp->v_op->vop_setattr == NULL)
@@ -282,6 +288,7 @@ VOP_IOCTL(struct vnode *vp, u_long comma
  a.a_cred = cred;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  if (vp->v_op->vop_ioctl == NULL)
  return (EOPNOTSUPP);
 
@@ -300,6 +307,7 @@ VOP_POLL(struct vnode *vp, int fflag, in
  a.a_events = events;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  if (vp->v_op->vop_poll == NULL)
  return (EOPNOTSUPP);
 
@@ -344,6 +352,7 @@ VOP_FSYNC(struct vnode *vp, struct ucred
  a.a_waitfor = waitfor;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  ASSERT_VP_ISLOCKED(vp);
 
  if (vp->v_op->vop_fsync == NULL)
@@ -565,6 +574,7 @@ VOP_INACTIVE(struct vnode *vp, struct pr
  a.a_vp = vp;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  ASSERT_VP_ISLOCKED(vp);
 
  if (vp->v_op->vop_inactive == NULL)
@@ -581,6 +591,7 @@ VOP_RECLAIM(struct vnode *vp, struct pro
  a.a_vp = vp;
  a.a_p = p;
 
+ KASSERT(p == curproc);
  if (vp->v_op->vop_reclaim == NULL)
  return (EOPNOTSUPP);
 

Reply | Threaded
Open this post in threaded view
|

Re: KASSERT() for VOP_*

Martin Pieuchot
On 09/09/20(Wed) 08:41, Martin Pieuchot wrote:
> This is mostly the same diff that has been backed out months ago with
> the VOP_CLOSE() case fixed.  VOP_CLOSE() can accept a NULL argument
> instead of `curproc' when garbage collecting passed FDs.
>
> The intent is to stop passing a "struct proc *" when a function applies
> only to `curproc'.  Synchronization/locking primitives are obviously
> different if a CPU can modify the fields of any thread or only of the
> current one.

Anyone?

> Index: kern/vfs_vops.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/vfs_vops.c,v
> retrieving revision 1.28
> diff -u -p -r1.28 vfs_vops.c
> --- kern/vfs_vops.c 8 Apr 2020 08:07:51 -0000 1.28
> +++ kern/vfs_vops.c 27 Apr 2020 08:10:02 -0000
> @@ -145,6 +145,8 @@ VOP_OPEN(struct vnode *vp, int mode, str
>   a.a_cred = cred;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
> +
>   if (vp->v_op->vop_open == NULL)
>   return (EOPNOTSUPP);
>  
> @@ -164,6 +166,7 @@ VOP_CLOSE(struct vnode *vp, int fflag, s
>   a.a_cred = cred;
>   a.a_p = p;
>  
> + KASSERT(p == NULL || p == curproc);
>   ASSERT_VP_ISLOCKED(vp);
>  
>   if (vp->v_op->vop_close == NULL)
> @@ -184,6 +187,7 @@ VOP_ACCESS(struct vnode *vp, int mode, s
>   a.a_cred = cred;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   ASSERT_VP_ISLOCKED(vp);
>  
>   if (vp->v_op->vop_access == NULL)
> @@ -202,6 +206,7 @@ VOP_GETATTR(struct vnode *vp, struct vat
>   a.a_cred = cred;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   if (vp->v_op->vop_getattr == NULL)
>   return (EOPNOTSUPP);
>  
> @@ -219,6 +224,7 @@ VOP_SETATTR(struct vnode *vp, struct vat
>   a.a_cred = cred;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   ASSERT_VP_ISLOCKED(vp);
>  
>   if (vp->v_op->vop_setattr == NULL)
> @@ -282,6 +288,7 @@ VOP_IOCTL(struct vnode *vp, u_long comma
>   a.a_cred = cred;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   if (vp->v_op->vop_ioctl == NULL)
>   return (EOPNOTSUPP);
>  
> @@ -300,6 +307,7 @@ VOP_POLL(struct vnode *vp, int fflag, in
>   a.a_events = events;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   if (vp->v_op->vop_poll == NULL)
>   return (EOPNOTSUPP);
>  
> @@ -344,6 +352,7 @@ VOP_FSYNC(struct vnode *vp, struct ucred
>   a.a_waitfor = waitfor;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   ASSERT_VP_ISLOCKED(vp);
>  
>   if (vp->v_op->vop_fsync == NULL)
> @@ -565,6 +574,7 @@ VOP_INACTIVE(struct vnode *vp, struct pr
>   a.a_vp = vp;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   ASSERT_VP_ISLOCKED(vp);
>  
>   if (vp->v_op->vop_inactive == NULL)
> @@ -581,6 +591,7 @@ VOP_RECLAIM(struct vnode *vp, struct pro
>   a.a_vp = vp;
>   a.a_p = p;
>  
> + KASSERT(p == curproc);
>   if (vp->v_op->vop_reclaim == NULL)
>   return (EOPNOTSUPP);
>  
>