Patch for inode SLIST conversion

Michael Neumann mneumann at ntecs.de
Fri Jan 18 12:12:35 PST 2008


Hi,

Appended my first patch. And it wasn't that hard at all. The Projects 
page of the DF wiki mentioned to convert the inode i_next field to use a 
SLIST. This is what the patch does. I'm not sure if it is that usable :)

I introduced a SLIST_FOREACH_WITH_PREV macro which keeps a pointer to 
the previous element around in each iteration. That's quite usable if 
you want to insert an element somewhere (just break out of the for loop 
and use SLIST_NEXT(prev) = new_element).

Regards,

  Michael
Index: sys/vfs/ufs/inode.h
===================================================================
RCS file: /home/dcvs/src/sys/vfs/ufs/inode.h,v
retrieving revision 1.12
diff -u -r1.12 inode.h
--- sys/vfs/ufs/inode.h	10 Sep 2006 01:26:41 -0000	1.12
+++ sys/vfs/ufs/inode.h	18 Jan 2008 17:34:23 -0000
@@ -81,7 +81,7 @@
  * active, and is put back when the file is no longer being used.
  */
 struct inode {
-	struct inode	*i_next;/* Hash chain */
+	SLIST_ENTRY(inode) i_next;/* Hash chain */
 	struct	vnode  *i_vnode;/* Vnode associated with this inode. */
 	struct	vnode  *i_devvp;/* Vnode for block I/O. */
 	uint32_t i_flag;	/* flags, see below */
Index: sys/vfs/ufs/ufs_ihash.c
===================================================================
RCS file: /home/dcvs/src/sys/vfs/ufs/ufs_ihash.c,v
retrieving revision 1.20
diff -u -r1.20 ufs_ihash.c
--- sys/vfs/ufs/ufs_ihash.c	14 Oct 2006 16:26:40 -0000	1.20
+++ sys/vfs/ufs/ufs_ihash.c	18 Jan 2008 19:24:49 -0000
@@ -51,13 +51,23 @@
 /*
  * Structures associated with inode cacheing.
  */
-static struct inode **ihashtbl;
+SLIST_HEAD(ihashtbl_bucket, inode);
+static struct ihashtbl_bucket *ihashtbl;
 static u_long	ihash;		/* size of hash table - 1 */
 static struct lwkt_token ufs_ihash_token;
 
 #define	INOHASH(device, inum)	(&ihashtbl[(minor(device) + (inum)) & ihash])
 
 /*
+ * Keeps the previous element around while iterating through the list.
+ */
+#define SLIST_FOREACH_WITH_PREV(var, prev, head, field)			\
+	for (((prev) = NULL, (var) = SLIST_FIRST((head)));		\
+	    (var);							\
+	    ((prev) = (var), (var) = SLIST_NEXT((var), field)))
+
+
+/*
  * Initialize inode hash table.
  */
 void
@@ -66,7 +76,7 @@
 	ihash = 16;
 	while (ihash < desiredvnodes)
 		ihash <<= 1;
-	ihashtbl = kmalloc(sizeof(void *) * ihash, M_UFSIHASH, M_WAITOK|M_ZERO);
+	ihashtbl = kmalloc(sizeof(struct ihashtbl_bucket) * ihash, M_UFSIHASH, M_WAITOK|M_ZERO);
 	--ihash;
 	lwkt_token_init(&ufs_ihash_token);
 }
@@ -94,7 +104,8 @@
 	lwkt_tokref ilock;
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
-	for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+
+	SLIST_FOREACH(ip, INOHASH(dev, inum), i_next) {
 		if (inum == ip->i_number && dev == ip->i_dev)
 			break;
 	}
@@ -122,7 +133,7 @@
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
 loop:
-	for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+	SLIST_FOREACH(ip, INOHASH(dev, inum), i_next) {
 		if (inum != ip->i_number || dev != ip->i_dev)
 			continue;
 		vp = ITOV(ip);
@@ -132,7 +143,7 @@
 		 * We must check to see if the inode has been ripped
 		 * out from under us after blocking.
 		 */
-		for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+		SLIST_FOREACH(ip, INOHASH(dev, inum), i_next) {
 			if (inum == ip->i_number && dev == ip->i_dev)
 				break;
 		}
@@ -159,7 +170,7 @@
 	struct inode *ip;
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
-	for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
+	SLIST_FOREACH(ip, INOHASH(dev, inum), i_next) {
 		if (inum == ip->i_number && dev == ip->i_dev)
 			break;
 	}
@@ -173,22 +184,24 @@
 int
 ufs_ihashins(struct inode *ip)
 {
-	struct inode **ipp;
 	struct inode *iq;
+	struct inode *prev;
+	struct ihashtbl_bucket *head; 
 	lwkt_tokref ilock;
 
 	KKASSERT((ip->i_flag & IN_HASHED) == 0);
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
-	ipp = INOHASH(ip->i_dev, ip->i_number);
-	while ((iq = *ipp) != NULL) {
+
+	head = INOHASH(ip->i_dev, ip->i_number);
+	SLIST_FOREACH_WITH_PREV(iq, prev, head, i_next) {
 		if (ip->i_dev == iq->i_dev && ip->i_number == iq->i_number) {
 			lwkt_reltoken(&ilock);
 			return(EBUSY);
 		}
-		ipp = &iq->i_next;
 	}
-	ip->i_next = NULL;
-	*ipp = ip;
+	if (prev) SLIST_NEXT(prev, i_next) = ip; 
+	else      SLIST_FIRST(head) = ip; 
+	SLIST_NEXT(ip, i_next) = NULL;
 	ip->i_flag |= IN_HASHED;
 	lwkt_reltoken(&ilock);
 	return(0);
@@ -201,20 +214,11 @@
 ufs_ihashrem(struct inode *ip)
 {
 	lwkt_tokref ilock;
-	struct inode **ipp;
-	struct inode *iq;
 
 	lwkt_gettoken(&ilock, &ufs_ihash_token);
 	if (ip->i_flag & IN_HASHED) {
-		ipp = INOHASH(ip->i_dev, ip->i_number);
-		while ((iq = *ipp) != NULL) {
-			if (ip == iq)
-				break;
-			ipp = &iq->i_next;
-		}
-		KKASSERT(ip == iq);
-		*ipp = ip->i_next;
-		ip->i_next = NULL;
+		SLIST_REMOVE(INOHASH(ip->i_dev, ip->i_number), ip, inode, i_next);
+		SLIST_NEXT(ip, i_next) = NULL;
 		ip->i_flag &= ~IN_HASHED;
 	}
 	lwkt_reltoken(&ilock);




More information about the Kernel mailing list