Patch for inode SLIST conversion

Michael Neumann mneumann at ntecs.de
Mon Jan 21 06:46:38 PST 2008


Matthew Dillon wrote:
     Keep in mind that tokens have virtually no aquisition or release
     overhead.  If you read the lwkt_gettoken() code path carefully you
     will notice that it doesn't actually have to do all that much...
     it doesn't even need to get a spinlock once the token is passively
     owned by the originating cpu, so applications which use tokens
     heavily will be running a very optimal code path.  And the token
     tracking structure is declared on the stack, meaning it is virtually
     guarenteed to be in the cpu's L1 cache.
     All the token handling complexity occurs ONLY when a thread blocks
     while holding a token.  This doesn't happen very often and is not
     considered to be a critical code path.  The integration of the
     token code with the LWKT thread scheduler allows the scheduler to
     simply refrain from scheduling threads whos tokens cannot be
     immediately acquired, avoiding the wakeup/return-to-sleep loops
     that other locks have to deal with.  It also gives the tokens a
     very specific operating mechanic that is extremely useful for
     iterating over global system lists without having to worry about
     lock order reversals or anything else.
Just out of interest I implemented Simons idea (appended). I've used a 
64-bit integer as generation counter to reduce the very low probable 
danger of a wrap-around (very unlikely to happen but possible ;-).

If checks for token staleness are quite common in the kernel, I'd 
suggest to use a specific function (like lwkt_token_stale) to
document that using a function instead of a comment and to avoid that
everybody has to roll out her own.

If it's too critical to build right into the token/token_ref itself, why
not simply make it a separate structure:
  struct lwkt_stale {
    int64_t s_generation;
  }
  struct lwkt_staleref {
    struct lwkt_stale *sr_stale;
    int64_t sr_generation;
  };
  void lwkt_initstale(lwkt_stale *s, lwkt_staleref *sr)
  {
    sr->sr_stale = s;
    sr->sr_generation = ++s->s_generation;
  }
  int lwkt_is_stale(lwkt_staleref *sr)
  {
    return (sr->sr_generation != sr->sr_stale->s_generation);
  }


  static lwkt_stale s;
  void x()
  {
    lwkt_staleref sr;
    lwkt_initstale(&sr, &s);

    ...
    block
    ...
    if (lwkt_is_stale(&sr))
    {
      // backup
    }
  }
That's of course not that beautiful to use.
The more important question, is it that useful/common at all?
Regards,

  Michael
--- /tmp/thread.h	2008-01-21 15:22:25.728757019 +0100
+++ sys/thread.h	2008-01-21 15:16:04.689710157 +0100
@@ -106,6 +106,7 @@
     struct spinlock	t_spinlock;	/* Controls access */
     struct thread	*t_owner;	/* The current owner of the token */
     int			t_count;	/* Per-thread count */
+    int64_t		t_generation;	/* Generation count */
 } lwkt_token;
 
 #else
@@ -114,6 +115,7 @@
     struct spinlock	t_unused01;
     struct thread	*t_unused02;
     int			t_globalcount;	/* Global reference count */
+    int64_t		t_generation;	/* Generation count */
 } lwkt_token;
 
 #endif
@@ -122,10 +124,11 @@
     lwkt_token_t	tr_tok;		/* token in question */
     lwkt_tokref_t	tr_next;	/* linked list */
     int			tr_state;	/* 0 = don't have, 1 = have */
+    int64_t		tr_generation;	/* Generation of token when acquired */
 } lwkt_tokref;
 
 #define LWKT_TOKREF_INIT(tok)		\
-			{ tok, NULL, 0 }
+			{ tok, NULL, 0, 0 }
 #define LWKT_TOKREF_DECLARE(name, tok)	\
 			lwkt_tokref name = LWKT_TOKREF_INIT(tok)
 
@@ -361,6 +364,7 @@
 extern void lwkt_drain_token_requests(void);
 extern void lwkt_token_init(lwkt_token_t);
 extern void lwkt_token_uninit(lwkt_token_t);
+extern void lwkt_token_stale(lwkt_tokref_t, lwkt_token_t);
 
 extern void lwkt_token_pool_init(void);
 extern lwkt_token_t lwkt_token_pool_get(void *);
--- /tmp/lwkt_token.c	2008-01-21 15:21:40.765752783 +0100
+++ kern/lwkt_token.c	2008-01-21 15:24:44.900776089 +0100
@@ -168,6 +168,7 @@
 	    KKASSERT(tok->t_count == 0);
 	}
 	++tok->t_count;
+	refs->tr_generation = ++tok->t_generation;
 	refs->tr_state = 1;
     }
     return (TRUE);
@@ -258,6 +259,7 @@
     /* NOTE: 'td' invalid after loop */
     ++tok->t_globalcount;
 #endif
+    ref->tr_generation = ++tok->t_generation;
     ref->tr_state = 1;
 }
 
@@ -313,6 +315,7 @@
     /* NOTE: 'td' invalid after loop */
     ++tok->t_globalcount;
 #endif
+    ref->tr_generation = ++tok->t_generation;
     ref->tr_state = 1;
     return (TRUE);
 }
@@ -325,6 +328,17 @@
     _lwkt_gettokref(ref);
 }
 
+int
+lwkt_token_stale(lwkt_tokref_t ref)
+{
+#ifdef SMP
+    KKASSERT(ref->tr_state == 1 && tok->t_owner == td && tok->t_count > 0);
+#else
+    KKASSERT(ref->tr_state == 1 && tok->t_globalcount > 0);
+#endif
+    return (ref->tr_generation != ref->tr_tok->t_generation);
+}
+
 void
 lwkt_gettokref(lwkt_tokref_t ref)
 {




More information about the Kernel mailing list