changeset 20709:fff3c2daf075 draft lockdep

TODO & patches
author Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
date Sun, 22 Mar 2020 11:42:54 -0400
parents c611dfebe78a
children
files TODO patches/1.patch patches/2.patch patches/3.patch patches/4.patch patches/5.patch patches/6.patch patches/7.patch
diffstat 8 files changed, 2352 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/TODO	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,38 @@
+- maintain dependency graph
+- keep timestamps of when we locked?
+- recursive may be ok
+  - addr ascending (e.g., pcf)
+- rwlocks?
+- semaphores?
+- mod unload "purging" lock infos?
+  - is there a way to know that a mutex was init'd by a module and that it
+    never escaped the module?  I don't think so.
+- mdb module
+  - walk held locks by a thread
+  - print held locks by a thread (dcmd)
+  - dump dep graph
+- C++ guards in the header file
+- check for held locks during return to userspace
+
+- implicit init'd mutex
+  - cpu_lock
+  - memlist_freelist_mutex
+  - freemem_lock
+  - pcf array pcf_lock
+    - it's an array; when getting more, incr-addr
+- const-ify the mutex args in ldep
+
+
+
+cpu_lock:
+	mutex_enter+0x11()
+	lgrp_mem_init+0x14c(0, babecafe, 0)
+	lgrp_config+0x1c1(7, 0, babecafe)
+	mem_node_add_slice+0xfe(0, 9c)
+	plat_build_mem_nodes+0x178(c11560)
+	startup_build_mem_nodes+0x24(c11560)
+	startup_memlist+0x198()
+	startup+0x35()
+	main+0x5c()
+	_locore_start+0x90()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/1.patch	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,969 @@
+commit 211de396eee327ed1eb5db45aea5a0568ec41363
+Author: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+Date:   Wed Oct 21 13:44:26 2015 -0400
+
+    patch lockdep
+
+diff --git a/usr/src/pkg/manifests/system-header.mf b/usr/src/pkg/manifests/system-header.mf
+index 08f0b19..8630b12 100644
+--- a/usr/src/pkg/manifests/system-header.mf
++++ b/usr/src/pkg/manifests/system-header.mf
+@@ -1178,6 +1178,7 @@ file path=usr/include/sys/llc1.h
+ file path=usr/include/sys/loadavg.h
+ file path=usr/include/sys/localedef.h
+ file path=usr/include/sys/lock.h
++file path=usr/include/sys/lockdep.h
+ file path=usr/include/sys/lockfs.h
+ file path=usr/include/sys/lofi.h
+ file path=usr/include/sys/log.h
+diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
+index cd9da11..b805e03 100644
+--- a/usr/src/uts/common/Makefile.files
++++ b/usr/src/uts/common/Makefile.files
+@@ -213,6 +213,7 @@ GENUNIX_OBJS +=	\
+ 		labelsys.o	\
+ 		link.o		\
+ 		list.o		\
++		lockdep.o	\
+ 		lockstat_subr.o	\
+ 		log_sysevent.o	\
+ 		logsubr.o	\
+diff --git a/kernel/os/kmem.c b/kernel/os/kmem.c
+index cc53c2f..f79fa2d 100644
+--- a/kernel/os/kmem.c
++++ b/kernel/os/kmem.c
+@@ -1440,7 +1440,7 @@ kmem_log_init(size_t logsize)
+ 	    NULL, NULL, VM_SLEEP);
+ 	bzero(lhp, lhsize);
+ 
+-	mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&lhp->lh_lock, "kmem log header", MUTEX_DEFAULT, NULL);
+ 	lhp->lh_nchunks = nchunks;
+ 	lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
+ 	lhp->lh_base = vmem_alloc(kmem_log_arena,
+@@ -1451,7 +1451,8 @@ kmem_log_init(size_t logsize)
+ 
+ 	for (i = 0; i < max_ncpus; i++) {
+ 		kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
+-		mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
++		mutex_init(&clhp->clh_lock, "kmem log header cpu", MUTEX_DEFAULT,
++		    NULL);
+ 		clhp->clh_chunk = i;
+ 	}
+ 
+@@ -3951,7 +3952,7 @@ kmem_cache_create(
+ 	/*
+ 	 * Initialize the rest of the slab layer.
+ 	 */
+-	mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&cp->cache_lock, "kmem cache", MUTEX_DEFAULT, NULL);
+ 
+ 	avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
+ 	    sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
+@@ -3973,7 +3974,7 @@ kmem_cache_create(
+ 	/*
+ 	 * Initialize the depot.
+ 	 */
+-	mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&cp->cache_depot_lock, "kmem depot", MUTEX_DEFAULT, NULL);
+ 
+ 	for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
+ 		continue;
+@@ -3985,7 +3986,7 @@ kmem_cache_create(
+ 	 */
+ 	for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
+ 		kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
+-		mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
++		mutex_init(&ccp->cc_lock, "kmem cpu", MUTEX_DEFAULT, NULL);
+ 		ccp->cc_flags = cp->cache_flags;
+ 		ccp->cc_rounds = -1;
+ 		ccp->cc_prounds = -1;
+diff --git a/kernel/os/lockdep.c b/kernel/os/lockdep.c
+new file mode 100644
+index 0000000..edc8d8d
+--- /dev/null
++++ b/kernel/os/lockdep.c
+@@ -0,0 +1,588 @@
++#include <sys/lockdep.h>
++#include <sys/thread.h>
++#include <sys/types.h>
++#include <sys/sysmacros.h>
++#include <sys/sunddi.h>
++#include <sys/avl.h>
++#include <sys/list.h>
++
++#define LOCKDEP_DEP_LIST_SIZE		16
++#define LOCKDEP_NUM_INFOS		(16 * 1024)	/* max number of live mutexes */
++#define LOCKDEP_NUM_CLASSES		256	/* max number of classes */
++#define LOCKDEP_CLASS_NAME_LEN		32	/* longest possible class name */
++
++#define	KEEP_GOING
++
++/* we never saw a mutex_init */
++#define UNKNOWN_LOCK_CLASS		"<unknown>"
++/* we got a NULL class name in mutex_init */
++#define NULL_LOCK_CLASS			"<null>"
++
++struct lock_class {
++	union {
++		avl_node_t	tree;
++		list_node_t	list;
++	} lc_node;
++
++	char			lc_name[LOCKDEP_CLASS_NAME_LEN];
++	int			lc_ndeps;
++#if 0
++	struct lock_class	*lc_deps[LOCKDEP_DEP_LIST_SIZE];
++#endif
++};
++
++struct lock_info {
++	union {
++		avl_node_t	tree;
++		list_node_t	list;
++	} li_node;
++
++	struct lock_class	*li_class;
++	kmutex_t		*li_lock;
++	boolean_t		li_implicit_init;
++};
++
++static int lockdep_enabled;
++static avl_tree_t lockinfo;
++static list_t lockinfo_freelist;
++static struct lock_info lockinfo_slab[LOCKDEP_NUM_INFOS];
++static avl_tree_t lockclass;
++static list_t lockclass_freelist;
++static struct lock_class lockclass_slab[LOCKDEP_NUM_CLASSES];
++static kmutex_t lockdep_lock;
++static char lockdep_msgbuf[1024];
++static char *lockdep_msgbuf_ptr;
++
++#define MSG(...)	lockdep_msgbuf_ptr += snprintf(lockdep_msgbuf_ptr, \
++				sizeof(lockdep_msgbuf) - (lockdep_msgbuf_ptr - lockdep_msgbuf), \
++				__VA_ARGS__)
++
++/*
++ * There are various errors we may encounter.  Each of the following
++ * functions deals with a different error.
++ */
++static void
++__error_print_lock_info(struct lock_info *li)
++{
++
++	MSG(" %p (%s)\n", li->li_lock, li->li_class->lc_name);
++}
++
++static void
++error_mutex_reinitialization(kmutex_t *mutex, char *name)
++{
++	MSG("mutex_init(%p, \"%s\", ...) called on an already initialized "
++	    "mutex\n", mutex, name);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++__error_mutex_enter_heading(struct lock_info *li)
++{
++	MSG("thread %p is trying to acquire lock:\n", curthread);
++	__error_print_lock_info(li);
++}
++
++static void
++error_mutex_enter_unknown(kmutex_t *mutex)
++{
++	MSG("mutex_enter(%p) called on an unknown (unintialized?) lock\n",
++	    mutex);
++	MSG("Assuming adaptive mutex of class \"%s\"\n",
++	    UNKNOWN_LOCK_CLASS);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++error_mutex_enter_deadlock(struct lock_info *li, struct held_lock *hl)
++{
++	MSG("possible deadlock detected\n");
++	__error_mutex_enter_heading(li);
++	MSG("but thread is already holding it:\n");
++	__error_print_lock_info(hl->hl_info);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++error_mutex_enter_recursive(struct lock_info *li, struct held_lock *hl)
++{
++	MSG("possible recursive locking detected\n");
++	__error_mutex_enter_heading(li);
++	MSG("but thread is already holding lock:\n");
++	__error_print_lock_info(hl->hl_info);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++error_mutex_exit_unknown(kmutex_t *mutex)
++{
++	MSG("thread %p is trying to release an unknown mutex %p\n",
++	    curthread, mutex);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++error_mutex_exit_notowner(struct lock_info *li)
++{
++	MSG("thread %p is trying to release a mutex it doesn't own:\n",
++	    curthread);
++	__error_print_lock_info(li);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++error_mutex_destroy_unknown(kmutex_t *mutex)
++{
++	MSG("thread %p is trying to destroy an unknown mutex %p\n",
++	    curthread, mutex);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++error_held_stack_overflow(struct lock_info *li)
++{
++	MSG("thread %p tried to hold onto too many locks (%d max)\n", curthread,
++	    LOCKDEP_HELD_STACK_SIZE);
++	MSG("lock: %p name: \"%s\"\n", li->li_lock, li->li_class->lc_name);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++/*
++ * The core part of lock dep.
++ */
++static int
++lockinfo_cmp(const void *a, const void *b)
++{
++	const struct lock_info *lia = a;
++	const struct lock_info *lib = b;
++	uintptr_t va = (uintptr_t) lia->li_lock;
++	uintptr_t vb = (uintptr_t) lib->li_lock;
++
++	if (va < vb)
++		return (-1);
++	if (va > vb)
++		return (1);
++	return (0);
++}
++
++static int
++lockclass_cmp(const void *a, const void *b)
++{
++	const struct lock_class *lca = a;
++	const struct lock_class *lcb = b;
++	int ret;
++
++	ret = strncmp(lca->lc_name, lcb->lc_name, LOCKDEP_CLASS_NAME_LEN);
++	if (ret < 0)
++		return (-1);
++	if (ret > 0)
++		return (1);
++	return (0);
++}
++
++static struct lock_info *
++alloc_lock_info(kmutex_t *mutex, struct lock_class *lc)
++{
++	struct lock_info *li;
++
++	li = list_remove_head(&lockinfo_freelist);
++	if (!li)
++		return (NULL);
++
++	li->li_lock = mutex;
++	li->li_class = lc;
++
++	avl_add(&lockinfo, li);
++
++	return (li);
++}
++
++static void
++free_lock_info(struct lock_info *li)
++{
++	avl_remove(&lockinfo, li);
++
++	list_insert_head(&lockinfo_freelist, li);
++}
++
++static struct lock_class *
++alloc_lock_class(char *name)
++{
++	struct lock_class *lc;
++
++	lc = list_remove_head(&lockclass_freelist);
++	if (!lc)
++		return (NULL);
++
++	if (!name)
++		name = NULL_LOCK_CLASS;
++
++	(void) strlcpy(lc->lc_name, name, LOCKDEP_CLASS_NAME_LEN);
++	lc->lc_ndeps = 0;
++
++	avl_add(&lockclass, lc);
++
++	return (lc);
++}
++
++static struct held_lock *
++alloc_held_lock(struct lock_info *li)
++{
++	struct held_lock *hl;
++
++	curthread->t_nheldlocks++;
++
++	if (curthread->t_nheldlocks == LOCKDEP_HELD_STACK_SIZE) {
++		error_held_stack_overflow(li);
++		return (NULL);
++	}
++
++	hl = &curthread->t_heldlocks[curthread->t_nheldlocks - 1];
++
++	hl->hl_info = li;
++	hl->hl_stacktrace_size = getpcstack(hl->hl_stacktrace,
++	    LOCKDEP_HELD_STACKTRACE_SIZE);
++
++	return (hl);
++}
++
++static struct lock_info *
++lookup_lock_info(kmutex_t *mutex)
++{
++	struct lock_info key;
++	struct lock_info *li;
++
++	key.li_lock = mutex;
++
++	return (avl_find(&lockinfo, &key, NULL));
++}
++
++static struct lock_class *
++lookup_lock_class(char *name)
++{
++	struct lock_class key;
++
++	if (!name)
++		name = NULL_LOCK_CLASS;
++
++	(void) strlcpy(key.lc_name, name, LOCKDEP_CLASS_NAME_LEN);
++
++	return (avl_find(&lockclass, &key, NULL));
++}
++
++void
++lockdep_init()
++{
++	int i;
++
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++
++	avl_create(&lockinfo, lockinfo_cmp, sizeof (struct lock_info),
++		   offsetof(struct lock_info, li_node.tree));
++	list_create(&lockinfo_freelist, sizeof (struct lock_info),
++		    offsetof(struct lock_info, li_node.list));
++
++	avl_create(&lockclass, lockclass_cmp, sizeof (struct lock_class),
++		   offsetof(struct lock_class, lc_node.tree));
++	list_create(&lockclass_freelist, sizeof (struct lock_class),
++		    offsetof(struct lock_class, lc_node.list));
++
++	for (i = 0; i < LOCKDEP_NUM_INFOS; i++)
++		list_insert_head(&lockinfo_freelist, &lockinfo_slab[i]);
++
++	for (i = 0; i < LOCKDEP_NUM_CLASSES; i++)
++		list_insert_head(&lockclass_freelist, &lockclass_slab[i]);
++
++	mutex_init(&lockdep_lock, "lockdep state lock", MUTEX_SPIN, NULL);
++
++	/* set up the NULL and unknown lock classes */
++	(void) alloc_lock_class(NULL_LOCK_CLASS);
++	(void) alloc_lock_class(UNKNOWN_LOCK_CLASS);
++
++	lockdep_enabled = 1;
++}
++
++static struct lock_info *
++lockdep_mutex_init_unlocked(kmutex_t *mutex, char *name, int type, void *arg,
++    boolean_t implicit)
++{
++	struct lock_class *lc;
++	struct lock_info *li;
++
++	lc = lookup_lock_class(name);
++	if (!lc) {
++		lc = alloc_lock_class(name);
++		if (!lc)
++			goto err;
++	}
++
++	/* is this a reinitialization? */
++	li = lookup_lock_info(mutex);
++	if (li) {
++		error_mutex_reinitialization(mutex, name);
++		return (NULL);
++	}
++
++	li = alloc_lock_info(mutex, lc);
++	if (!li)
++		goto err;
++
++	li->li_implicit_init = implicit;
++
++	return (li);
++
++err:
++	MSG("failed to allocate memory\n");
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++
++	return (NULL);
++}
++
++void
++lockdep_mutex_init(kmutex_t *mutex, char *name, int type, void *arg)
++{
++	struct lock_info *li;
++
++	if (mutex == &lockdep_lock)
++		return;
++
++	mutex_enter(&lockdep_lock);
++
++	if (!lockdep_enabled)
++		goto out;
++
++	li = lockdep_mutex_init_unlocked(mutex, name, type, arg, B_FALSE);
++	if (li)
++		goto out;
++
++#ifndef KEEP_GOING
++	lockdep_enabled = 0;
++#endif
++
++out:
++	mutex_exit(&lockdep_lock);
++}
++
++static void
++lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success)
++{
++	struct lock_class *lc;
++	struct lock_info *li;
++	struct held_lock *hl;
++	int i;
++
++	if (mutex == &lockdep_lock)
++		return;
++
++	if (try && !success)
++		return;
++
++	mutex_enter(&lockdep_lock);
++
++	if (!lockdep_enabled)
++		goto out;
++
++	li = lookup_lock_info(mutex);
++	if (!li) {
++		/* probably an implicit mutex_init() */
++		error_mutex_enter_unknown(mutex);
++
++		li = lockdep_mutex_init_unlocked(mutex, UNKNOWN_LOCK_CLASS,
++		    MUTEX_ADAPTIVE, NULL, B_TRUE);
++		if (!li)
++			goto brick;
++	}
++
++	/* check for recursive locking */
++	if (curthread->t_nheldlocks) {
++		struct held_lock *cur;
++
++		for (i = 0; i < curthread->t_nheldlocks; i++) {
++			cur = &curthread->t_heldlocks[i];
++
++			if (cur->hl_info->li_class != li->li_class)
++				continue;
++
++			if (cur->hl_info->li_lock == mutex)
++				error_mutex_enter_deadlock(li, cur);
++			else
++				error_mutex_enter_recursive(li, cur);
++			goto brick;
++		}
++	}
++
++	/* no issues, add the lock we're trying to get to the stack */
++	hl = alloc_held_lock(li);
++	if (!hl)
++		goto err_nomem;
++
++out:
++	mutex_exit(&lockdep_lock);
++	return;
++
++err_nomem:
++	MSG("failed to allocate memory\n");
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++
++brick:
++#ifndef KEEP_GOING
++	lockdep_enabled = 0;
++#endif
++	mutex_exit(&lockdep_lock);
++}
++
++void
++lockdep_mutex_exit(kmutex_t *mutex)
++{
++	struct held_lock *hl;
++	struct lock_info *li;
++	int i;
++
++	if (mutex == &lockdep_lock)
++		return;
++
++	mutex_enter(&lockdep_lock);
++
++	if (!lockdep_enabled)
++		goto out;
++
++	for (i = 0; i < curthread->t_nheldlocks; i++) {
++		hl = &curthread->t_heldlocks[i];
++
++		if (hl->hl_info->li_lock != mutex)
++			continue;
++
++		/*
++		 * Found it.  Remove the held lock information.
++		 */
++		if (i != curthread->t_nheldlocks - 1)
++			memmove(&curthread->t_heldlocks[i],
++				&curthread->t_heldlocks[i + 1],
++				curthread->t_nheldlocks - i - 1);
++
++		curthread->t_nheldlocks--;
++		goto out;
++	}
++
++	/*
++	 * We never locked this mutex.  Either, this is a bogus lock (that
++	 * we've never seen) or a different thread locked it and we're
++	 * trying to release it.
++	 */
++
++	li = lookup_lock_info(mutex);
++	if (!li)
++		error_mutex_exit_unknown(mutex);
++	else
++		error_mutex_exit_notowner(li);
++
++#ifndef KEEP_GOING
++	lockdep_enabled = 0;
++#endif
++
++out:
++	mutex_exit(&lockdep_lock);
++}
++
++void
++lockdep_mutex_destroy(kmutex_t *mutex)
++{
++	struct lock_info *li;
++
++	if (mutex == &lockdep_lock)
++		return;
++
++	mutex_enter(&lockdep_lock);
++
++	if (!lockdep_enabled)
++		goto out;
++
++	li = lookup_lock_info(mutex);
++	if (!li) {
++		error_mutex_destroy_unknown(mutex);
++#ifndef KEEP_GOING
++		lockdep_enabled = 0;
++#endif
++		goto out;
++	}
++
++	free_lock_info(li);
++
++out:
++	mutex_exit(&lockdep_lock);
++}
++
++/*
++ * Here, depending on whether or not we're on a debug kernel, we intercept
++ * mutex_lock, etc.
++ */
++#ifdef DEBUG
++extern void do_mutex_init(kmutex_t *, char *, kmutex_type_t, void *);
++extern void do_mutex_destroy(kmutex_t *);
++extern void do_mutex_enter(kmutex_t *);
++extern void do_mutex_exit(kmutex_t *);
++extern int do_mutex_tryenter(kmutex_t *);
++
++void
++mutex_init(kmutex_t *mutex, char *name, kmutex_type_t type, void *ibc)
++{
++	lockdep_mutex_init(mutex, name, type, ibc);
++
++	do_mutex_init(mutex, name, type, ibc);
++}
++
++void
++mutex_destroy(kmutex_t *mutex)
++{
++	lockdep_mutex_destory(mutex);
++
++	do_mutex_destroy(mutex);
++}
++
++void
++mutex_enter(kmutex_t *mutex)
++{
++	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE);
++	do_mutex_enter(mutex);
++}
++
++int
++mutex_tryenter(kmutex_t *mutex)
++{
++	int ret;
++
++	ret = do_mutex_enter(mutex);
++
++	lockdep_mutex_enter_common(mutex, B_TRUE, (boolean_t)ret);
++
++	return (ret);
++}
++
++void
++mutex_exit(kmutex_t *mutex)
++{
++	lockdep_mutex_exit(mutex);
++
++	do_mutex_exit(mutex);
++}
++#else
++/*
++ * Not a debug kernel, just redirect execution to the assembly routines that
++ * actually lock the mutex.
++ */
++#pragma weak mutex_init = do_mutex_init
++#pragma weak mutex_destroy = do_mutex_destroy
++#pragma weak mutex_enter = do_mutex_enter
++#pragma weak mutex_tryenter = mutex_tryenter
++#pragma weak mutex_exit = do_mutex_exit
++#endif
+diff --git a/kernel/os/main.c b/kernel/os/main.c
+index 7afc1cf..f7423a3 100644
+--- a/kernel/os/main.c
++++ b/kernel/os/main.c
+@@ -74,6 +74,7 @@
+ #include <sys/stack.h>
+ #include <sys/brand.h>
+ #include <sys/mmapobj.h>
++#include <sys/lockdep.h>
+ 
+ #include <vm/as.h>
+ #include <vm/seg_kmem.h>
+@@ -397,12 +398,15 @@ main(void)
+ 	ASSERT(curthread == CPU->cpu_thread);
+ 	ASSERT_STACK_ALIGNED();
+ 
++	lockdep_init();
++
+ 	/*
+ 	 * We take the ualock until we have completed the startup
+ 	 * to prevent kadmin() from disrupting this work. In particular,
+ 	 * we don't want kadmin() to bring the system down while we are
+ 	 * trying to start it up.
+ 	 */
++	mutex_init(&ualock, "ualock", MUTEX_DEFAULT, NULL);
+ 	mutex_enter(&ualock);
+ 
+ 	/*
+diff --git a/kernel/os/mutex.c b/kernel/os/mutex.c
+index a309ca8..1509f7c 100644
+--- a/kernel/os/mutex.c
++++ b/kernel/os/mutex.c
+@@ -335,7 +335,7 @@ void (*mutex_lock_delay)(uint_t) = default_lock_delay;
+ void (*mutex_delay)(void) = mutex_delay_default;
+ 
+ /*
+- * mutex_vector_enter() is called from the assembly mutex_enter() routine
++ * mutex_vector_enter() is called from the assembly do_mutex_enter() routine
+  * if the lock is held or is not of type MUTEX_ADAPTIVE.
+  */
+ void
+@@ -563,7 +563,7 @@ mutex_owner(const kmutex_t *mp)
+  */
+ /* ARGSUSED */
+ void
+-mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc)
++do_mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc)
+ {
+ 	mutex_impl_t *lp = (mutex_impl_t *)mp;
+ 
+@@ -615,7 +615,7 @@ mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc)
+ }
+ 
+ void
+-mutex_destroy(kmutex_t *mp)
++do_mutex_destroy(kmutex_t *mp)
+ {
+ 	mutex_impl_t *lp = (mutex_impl_t *)mp;
+ 
+diff --git a/kernel/os/vmem.c b/kernel/os/vmem.c
+index a554f8c..35c368f 100644
+--- a/kernel/os/vmem.c
++++ b/kernel/os/vmem.c
+@@ -1471,7 +1471,7 @@ vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
+ 	bzero(vmp, sizeof (vmem_t));
+ 
+ 	(void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
+-	mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&vmp->vm_lock, "vmem arena", MUTEX_DEFAULT, NULL);
+ 	cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
+ 	vmp->vm_cflags = vmflag;
+ 	vmflag &= VM_KMFLAGS;
+diff --git a/kernel/sys/lockdep.h b/kernel/sys/lockdep.h
+new file mode 100644
+index 0000000..eac4f19
+--- /dev/null
++++ b/kernel/sys/lockdep.h
+@@ -0,0 +1,19 @@
++#ifndef _LOCKDEP_H
++#define	_LOCKDEP_H
++
++#define	LOCKDEP_HELD_STACK_SIZE		16
++#define LOCKDEP_HELD_STACKTRACE_SIZE	16
++
++struct lock_info;
++
++struct held_lock {
++	struct lock_info *hl_info;
++
++	/* XXX: more context for of the mutex_enter? */
++	pc_t hl_stacktrace[LOCKDEP_HELD_STACKTRACE_SIZE];
++	int hl_stacktrace_size;
++};
++
++extern void lockdep_init();
++
++#endif
+diff --git a/kernel/sys/thread.h b/kernel/sys/thread.h
+index 188230d..b10a468 100644
+--- a/kernel/sys/thread.h
++++ b/kernel/sys/thread.h
+@@ -34,6 +34,7 @@
+ #include <sys/time.h>
+ #include <sys/signal.h>
+ #include <sys/kcpc.h>
++#include <sys/lockdep.h>
+ #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL)
+ #include <asm/thread.h>
+ #endif
+@@ -344,6 +345,13 @@ typedef struct _kthread {
+ 	kmutex_t	t_ctx_lock;	/* protects t_ctx in removectx() */
+ 	struct waitq	*t_waitq;	/* wait queue */
+ 	kmutex_t	t_wait_mutex;	/* used in CV wait functions */
++
++	/*
++	 * lockdep related fields
++	 */
++	int		t_nheldlocks;	/* number of held locks */
++	struct held_lock t_heldlocks[LOCKDEP_HELD_STACK_SIZE];
++					/* held locks array */
+ } kthread_t;
+ 
+ /*
+diff --git a/usr/src/uts/intel/ia32/ml/lock_prim.s b/usr/src/uts/intel/ia32/ml/lock_prim.s
+index 884ca02..b6d226c 100644
+--- a/arch/x86/kernel/ml/lock_prim.s
++++ b/arch/x86/kernel/ml/lock_prim.s
+@@ -23,8 +23,6 @@
+  * Use is subject to license terms.
+  */
+ 
+-#pragma ident	"%Z%%M%	%I%	%E% SMI"
+-
+ #if defined(lint) || defined(__lint)
+ #include <sys/types.h>
+ #include <sys/thread.h>
+@@ -509,22 +507,22 @@ lock_clear_splx(lock_t *lp, int s)
+ #endif	/* __lint */
+ 
+ /*
+- * mutex_enter() and mutex_exit().
++ * do_mutex_enter() and do_mutex_exit().
+  *
+  * These routines handle the simple cases of mutex_enter() (adaptive
+  * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
+  * If anything complicated is going on we punt to mutex_vector_enter().
+  *
+- * mutex_tryenter() is similar to mutex_enter() but returns zero if
++ * do_mutex_tryenter() is similar to do_mutex_enter() but returns zero if
+  * the lock cannot be acquired, nonzero on success.
+  *
+- * If mutex_exit() gets preempted in the window between checking waiters
++ * If do_mutex_exit() gets preempted in the window between checking waiters
+  * and clearing the lock, we can miss wakeups.  Disabling preemption
+  * in the mutex code is prohibitively expensive, so instead we detect
+  * mutex preemption by examining the trapped PC in the interrupt path.
+- * If we interrupt a thread in mutex_exit() that has not yet cleared
++ * If we interrupt a thread in do_mutex_exit() that has not yet cleared
+  * the lock, cmnint() resets its PC back to the beginning of
+- * mutex_exit() so it will check again for waiters when it resumes.
++ * do_mutex_exit() so it will check again for waiters when it resumes.
+  *
+  * The lockstat code below is activated when the lockstat driver
+  * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
+@@ -535,12 +533,12 @@ lock_clear_splx(lock_t *lp, int s)
+ 
+ /* ARGSUSED */
+ void
+-mutex_enter(kmutex_t *lp)
++do_mutex_enter(kmutex_t *lp)
+ {}
+ 
+ /* ARGSUSED */
+ int
+-mutex_tryenter(kmutex_t *lp)
++do_mutex_tryenter(kmutex_t *lp)
+ { return (0); }
+ 
+ /* ARGSUSED */
+@@ -550,14 +548,14 @@ mutex_adaptive_tryenter(mutex_impl_t *lp)
+ 
+ /* ARGSUSED */
+ void
+-mutex_exit(kmutex_t *lp)
++do_mutex_exit(kmutex_t *lp)
+ {}
+ 
+ #else
+ 
+ #if defined(__amd64)
+ 
+-	ENTRY_NP(mutex_enter)
++	ENTRY_NP(do_mutex_enter)
+ 	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
+ 	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
+ 	lock
+@@ -596,7 +594,7 @@ mutex_exit(kmutex_t *lp)
+ 	movl	$1, %eax			/* return success if tryenter */
+ 	ret
+ 	SET_SIZE(lockstat_wrapper)
+-	SET_SIZE(mutex_enter)
++	SET_SIZE(do_mutex_enter)
+ 
+ /*
+  * expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
+@@ -620,7 +618,7 @@ mutex_exit(kmutex_t *lp)
+ 	SET_SIZE(lockstat_wrapper_arg)
+ 
+ 
+-	ENTRY(mutex_tryenter)
++	ENTRY(do_mutex_tryenter)
+ 	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
+ 	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
+ 	lock
+@@ -642,7 +640,7 @@ mutex_exit(kmutex_t *lp)
+ 	movq	%rdi, %rsi
+ 	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
+ 	jmp	lockstat_wrapper
+-	SET_SIZE(mutex_tryenter)
++	SET_SIZE(do_mutex_tryenter)
+ 
+ 	ENTRY(mutex_adaptive_tryenter)
+ 	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
+@@ -695,7 +693,7 @@ mutex_owner_running_critical_size:
+ 
+ 	.globl	mutex_exit_critical_start
+ 
+-	ENTRY(mutex_exit)
++	ENTRY(do_mutex_exit)
+ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	movq	%gs:CPU_THREAD, %rdx
+ 	cmpq	%rdx, (%rdi)
+@@ -707,7 +705,7 @@ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	movq	%rdi, %rsi
+ 	movl	$LS_MUTEX_EXIT_RELEASE, %edi
+ 	jmp	lockstat_wrapper
+-	SET_SIZE(mutex_exit)
++	SET_SIZE(do_mutex_exit)
+ 
+ 	.globl	mutex_exit_critical_size
+ 	.type	mutex_exit_critical_size, @object
+@@ -718,7 +716,7 @@ mutex_exit_critical_size:
+ 
+ #else
+ 
+-	ENTRY_NP(mutex_enter)
++	ENTRY_NP(do_mutex_enter)
+ 	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
+ 	movl	4(%esp), %ecx			/* ecx = lock ptr */
+ 	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
+@@ -759,7 +757,7 @@ mutex_exit_critical_size:
+ 	popl	%ebp				/* pop off frame */
+ 	ret
+ 	SET_SIZE(lockstat_wrapper)
+-	SET_SIZE(mutex_enter)
++	SET_SIZE(do_mutex_enter)
+ 
+ 	ENTRY(lockstat_wrapper_arg)	/* expects edx=thread, ecx=lock, */
+ 					/* eax=lockstat event, pushed arg */
+@@ -788,7 +786,7 @@ mutex_exit_critical_size:
+ 	SET_SIZE(lockstat_wrapper_arg)
+ 
+ 
+-	ENTRY(mutex_tryenter)
++	ENTRY(do_mutex_tryenter)
+ 	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
+ 	movl	4(%esp), %ecx			/* ecx = lock ptr */
+ 	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
+@@ -810,7 +808,7 @@ mutex_exit_critical_size:
+ #endif	/* OPTERON_WORKAROUND_6323525 */
+ 	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
+ 	jmp	lockstat_wrapper
+-	SET_SIZE(mutex_tryenter)
++	SET_SIZE(do_mutex_tryenter)
+ 
+ 	ENTRY(mutex_adaptive_tryenter)
+ 	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
+@@ -866,7 +864,7 @@ mutex_owner_running_critical_size:
+ 
+ 	.globl	mutex_exit_critical_start
+ 
+-	ENTRY(mutex_exit)
++	ENTRY(do_mutex_exit)
+ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	movl	%gs:CPU_THREAD, %edx
+ 	movl	4(%esp), %ecx
+@@ -878,7 +876,7 @@ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	ret
+ 	movl	$LS_MUTEX_EXIT_RELEASE, %eax
+ 	jmp	lockstat_wrapper
+-	SET_SIZE(mutex_exit)
++	SET_SIZE(do_mutex_exit)
+ 
+ 	.globl	mutex_exit_critical_size
+ 	.type	mutex_exit_critical_size, @object
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/2.patch	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,219 @@
+commit 63ee60080a2f1f05c909156d3ee936e6c29bbe1e
+Author: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+Date:   Wed Oct 21 13:45:26 2015 -0400
+
+    patch classify-locks-1
+
+diff --git a/kernel/os/brand.c b/kernel/os/brand.c
+index eb8c6e7..071866f 100644
+--- a/kernel/os/brand.c
++++ b/kernel/os/brand.c
+@@ -76,7 +76,7 @@ static kmutex_t brand_list_lock;
+ void
+ brand_init()
+ {
+-	mutex_init(&brand_list_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&brand_list_lock, "brand list", MUTEX_DEFAULT, NULL);
+ 	p0.p_brand = &native_brand;
+ }
+ 
+diff --git a/kernel/os/devcfg.c b/kernel/os/devcfg.c
+index da412f4..8f6cc07 100644
+--- a/kernel/os/devcfg.c
++++ b/kernel/os/devcfg.c
+@@ -381,14 +381,15 @@ sid:		devi->devi_node_attributes |= DDI_PERSISTENT;
+ 	    "i_ddi_alloc_node: name=%s id=%d\n", node_name, devi->devi_nodeid));
+ 
+ 	cv_init(&(devi->devi_cv), NULL, CV_DEFAULT, NULL);
+-	mutex_init(&(devi->devi_lock), NULL, MUTEX_DEFAULT, NULL);
+-	mutex_init(&(devi->devi_pm_lock), NULL, MUTEX_DEFAULT, NULL);
+-	mutex_init(&(devi->devi_pm_busy_lock), NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&devi->devi_lock, "devi", MUTEX_DEFAULT, NULL);
++	mutex_init(&devi->devi_pm_lock, "devi pm", MUTEX_DEFAULT, NULL);
++	mutex_init(&devi->devi_pm_busy_lock, "devi pm busy", MUTEX_DEFAULT,
++	    NULL);
+ 
+ 	RIO_TRACE((CE_NOTE, "i_ddi_alloc_node: Initing contract fields: "
+ 	    "dip=%p, name=%s", (void *)devi, node_name));
+ 
+-	mutex_init(&(devi->devi_ct_lock), NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&devi->devi_ct_lock, "devi ct", MUTEX_DEFAULT, NULL);
+ 	cv_init(&(devi->devi_ct_cv), NULL, CV_DEFAULT, NULL);
+ 	devi->devi_ct_count = -1;	/* counter not in use if -1 */
+ 	list_create(&(devi->devi_ct), sizeof (cont_device_t),
+@@ -3281,7 +3282,7 @@ da_log_init()
+ 		logsize = MAX_DEVINFO_LOG_SIZE;
+ 
+ 	dh = kmem_alloc(logsize * PAGESIZE, KM_SLEEP);
+-	mutex_init(&dh->dh_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&dh->dh_lock, "devi log header", MUTEX_DEFAULT, NULL);
+ 	dh->dh_max = ((logsize * PAGESIZE) - sizeof (*dh)) /
+ 	    sizeof (devinfo_audit_t) + 1;
+ 	dh->dh_curr = -1;
+@@ -7504,7 +7505,7 @@ mt_config_init(dev_info_t *pdip, dev_info_t **dipp, int flags,
+ {
+ 	struct mt_config_handle	*hdl = kmem_alloc(sizeof (*hdl), KM_SLEEP);
+ 
+-	mutex_init(&hdl->mtc_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&hdl->mtc_lock, "MT config handle", MUTEX_DEFAULT, NULL);
+ 	cv_init(&hdl->mtc_cv, NULL, CV_DEFAULT, NULL);
+ 	hdl->mtc_pdip = pdip;
+ 	hdl->mtc_fdip = dipp;
+diff --git a/kernel/os/lgrp.c b/kernel/os/lgrp.c
+index 4fa73dd..e48ce98 100644
+--- a/kernel/os/lgrp.c
++++ b/kernel/os/lgrp.c
+@@ -967,7 +967,7 @@ lgrp_kstat_init(void)
+ {
+ 	lgrp_stat_t	stat;
+ 
+-	mutex_init(&lgrp_kstat_mutex, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&lgrp_kstat_mutex, "lgrp kstat", MUTEX_DEFAULT, NULL);
+ 
+ 	for (stat = 0; stat < LGRP_NUM_STATS; stat++)
+ 		kstat_named_init(&lgrp_kstat_data[stat],
+diff --git a/kernel/os/logsubr.c b/kernel/os/logsubr.c
+index 149f5f8..c7aa991 100644
+--- a/kernel/os/logsubr.c
++++ b/kernel/os/logsubr.c
+@@ -139,7 +139,7 @@ log_makeq(size_t lowat, size_t hiwat, void *ibc)
+ 	q->q_nfsrv = q;
+ 	q->q_lowat = lowat;
+ 	q->q_hiwat = hiwat;
+-	mutex_init(QLOCK(q), NULL, MUTEX_DRIVER, ibc);
++	mutex_init(QLOCK(q), "log queue", MUTEX_DRIVER, ibc);
+ 
+ 	return (q);
+ }
+diff --git a/kernel/os/sched.c b/kernel/os/sched.c
+index c1d6569..ef8840f 100644
+--- a/kernel/os/sched.c
++++ b/kernel/os/sched.c
+@@ -161,7 +161,7 @@ sched()
+ 	callb_cpr_t	cprinfo;
+ 	kmutex_t	swap_cpr_lock;
+ 
+-	mutex_init(&swap_cpr_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&swap_cpr_lock, "swap cpr lock", MUTEX_DEFAULT, NULL);
+ 	CALLB_CPR_INIT(&cprinfo, &swap_cpr_lock, callb_generic_cpr, "sched");
+ 	if (maxslp == 0)
+ 		maxslp = MAXSLP;
+diff --git a/kernel/os/task.c b/kernel/os/task.c
+index b25825e..e5e0127 100644
+--- a/kernel/os/task.c
++++ b/kernel/os/task.c
+@@ -1088,7 +1088,7 @@ task_kstat_delete(task_t *tk)
+ void
+ task_commit_thread_init()
+ {
+-	mutex_init(&task_commit_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&task_commit_lock, "task commit", MUTEX_DEFAULT, NULL);
+ 	cv_init(&task_commit_cv, NULL, CV_DEFAULT, NULL);
+ 	task_commit_thread = thread_create(NULL, 0, task_commit, NULL, 0,
+ 	    &p0, TS_RUN, minclsyspri);
+diff --git a/kernel/os/taskq.c b/kernel/os/taskq.c
+index f11f9cf..8103250 100644
+--- a/kernel/os/taskq.c
++++ b/kernel/os/taskq.c
+@@ -774,7 +774,7 @@ taskq_constructor(void *buf, void *cdrarg, int kmflags)
+ 
+ 	bzero(tq, sizeof (taskq_t));
+ 
+-	mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&tq->tq_lock, "taskq", MUTEX_DEFAULT, NULL);
+ 	rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
+ 	cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
+ 	cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL);
+@@ -1979,8 +1979,8 @@ taskq_create_common(const char *name, int instance, int nthreads, pri_t pri,
+ 
+ 		/* Initialize each bucket */
+ 		for (b_id = 0; b_id < bsize; b_id++, bucket++) {
+-			mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT,
+-			    NULL);
++			mutex_init(&bucket->tqbucket_lock, "taskq bucket",
++			    MUTEX_DEFAULT, NULL);
+ 			cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL);
+ 			bucket->tqbucket_taskq = tq;
+ 			bucket->tqbucket_freelist.tqent_next =
+diff --git a/kernel/os/timer.c b/kernel/os/timer.c
+index b25a6cb..bbc410d 100644
+--- a/kernel/os/timer.c
++++ b/kernel/os/timer.c
+@@ -552,7 +552,7 @@ timer_create(clockid_t clock, struct sigevent *evp, timer_t *tid)
+ 	 */
+ 	it = kmem_cache_alloc(clock_timer_cache, KM_SLEEP);
+ 	bzero(it, sizeof (itimer_t));
+-	mutex_init(&it->it_mutex, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&it->it_mutex, "itimer", MUTEX_DEFAULT, NULL);
+ 	sigq = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
+ 
+ 	mutex_enter(&p->p_lock);
+diff --git a/kernel/os/zone.c b/kernel/os/zone.c
+index 56c6543..5062f40 100644
+--- a/kernel/os/zone.c
++++ b/kernel/os/zone.c
+@@ -251,6 +251,10 @@
+ #include <vm/seg.h>
+ #include <sys/mac.h>
+ 
++#define	LOCK_CLASS_ZONE		"zone"
++#define	LOCK_CLASS_ZONE_NLWPS	"zone nlwps"
++#define	LOCK_CLASS_ZONE_MEM	"zone mem"
++
+ /*
+  * This constant specifies the number of seconds that threads waiting for
+  * subsystems to release a zone's general-purpose references will wait before
+@@ -1945,8 +1949,8 @@ zone_kstat_delete(zone_t *zone)
+ void
+ zone_zsd_init(void)
+ {
+-	mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL);
+-	mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&zonehash_lock, "zone hash", MUTEX_DEFAULT, NULL);
++	mutex_init(&zsd_key_lock, "zsd key", MUTEX_DEFAULT, NULL);
+ 	list_create(&zsd_registered_keys, sizeof (struct zsd_entry),
+ 	    offsetof(struct zsd_entry, zsd_linkage));
+ 	list_create(&zone_active, sizeof (zone_t),
+@@ -1954,9 +1958,11 @@ zone_zsd_init(void)
+ 	list_create(&zone_deathrow, sizeof (zone_t),
+ 	    offsetof(zone_t, zone_linkage));
+ 
+-	mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL);
+-	mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
+-	mutex_init(&zone0.zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&zone0.zone_lock, LOCK_CLASS_ZONE, MUTEX_DEFAULT, NULL);
++	mutex_init(&zone0.zone_nlwps_lock, LOCK_CLASS_ZONE_NLWPS,
++	    MUTEX_DEFAULT, NULL);
++	mutex_init(&zone0.zone_mem_lock, LOCK_CLASS_ZONE_MEM, MUTEX_DEFAULT,
++	    NULL);
+ 	zone0.zone_shares = 1;
+ 	zone0.zone_nlwps = 0;
+ 	zone0.zone_nlwps_ctl = INT_MAX;
+@@ -4290,9 +4296,11 @@ zone_create(const char *zone_name, const char *zone_root,
+ 	zone->zone_restart_init = B_TRUE;
+ 	zone->zone_brand = &native_brand;
+ 	zone->zone_initname = NULL;
+-	mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL);
+-	mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
+-	mutex_init(&zone->zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&zone->zone_lock, LOCK_CLASS_ZONE, MUTEX_DEFAULT, NULL);
++	mutex_init(&zone->zone_nlwps_lock, LOCK_CLASS_ZONE_NLWPS,
++	    MUTEX_DEFAULT, NULL);
++	mutex_init(&zone->zone_mem_lock, LOCK_CLASS_ZONE_MEM, MUTEX_DEFAULT,
++	    NULL);
+ 	cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL);
+ 	list_create(&zone->zone_ref_list, sizeof (zone_ref_t),
+ 	    offsetof(zone_ref_t, zref_linkage));
+diff --git a/kernel/vm/seg_vn.c b/kernel/vm/seg_vn.c
+index 7e514ba..ecdfbc5 100644
+--- a/kernel/vm/seg_vn.c
++++ b/kernel/vm/seg_vn.c
+@@ -266,7 +266,7 @@ segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
+ 	struct segvn_data *svd = buf;
+ 
+ 	rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
+-	mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&svd->segfree_syncmtx, "segvn segfree", MUTEX_DEFAULT, NULL);
+ 	svd->svn_trnext = svd->svn_trprev = NULL;
+ 	return (0);
+ }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/3.patch	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,103 @@
+commit fc598d39fd228d2c114ddef8b8f31d36c84da783
+Author: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+Date:   Wed Oct 21 13:46:26 2015 -0400
+
+    patch classify-locks-2
+
+diff --git a/kernel/os/main.c b/kernel/os/main.c
+index f7423a3..b990b49 100644
+--- a/kernel/os/main.c
++++ b/kernel/os/main.c
+@@ -409,6 +409,8 @@ main(void)
+ 	mutex_init(&ualock, "ualock", MUTEX_DEFAULT, NULL);
+ 	mutex_enter(&ualock);
+ 
++	mutex_init(&cpu_lock, "cpu", MUTEX_DEFAULT, NULL);
++
+ 	/*
+ 	 * Setup root lgroup and leaf lgroup for CPU 0
+ 	 */
+diff --git a/kernel/vm/vm_page.c b/kernel/vm/vm_page.c
+index b5d7850..d962f21 100644
+--- a/kernel/vm/vm_page.c
++++ b/kernel/vm/vm_page.c
+@@ -342,8 +342,9 @@ static void page_demote_vp_pages(page_t *);
+ 
+ void
+ pcf_init(void)
+-
+ {
++	uint_t i;
++
+ 	if (boot_ncpus != -1) {
+ 		pcf_fanout = boot_ncpus;
+ 	} else {
+@@ -368,6 +369,13 @@ pcf_init(void)
+ 		}
+ 	}
+ 	pcf_fanout_mask = pcf_fanout - 1;
++
++	/*
++	 * Initialize the pcf mutexes.
++	 */
++	for (i = 0; i < pcf_fanout; i++)
++		mutex_init(&pcf[i].pcf_lock, "page cache/free", MUTEX_DEFAULT,
++		    NULL);
+ }
+ 
+ /*
+@@ -378,6 +386,9 @@ vm_init(void)
+ {
+ 	boolean_t callb_vm_cpr(void *, int);
+ 
++	mutex_init(&freemem_lock, "freemem", MUTEX_DEFAULT, NULL);
++	mutex_init(&new_freemem_lock, "new freemem", MUTEX_DEFAULT, NULL);
++
+ 	(void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm");
+ 	page_init_mem_config();
+ 	page_retire_init();
+@@ -1385,14 +1396,10 @@ get_freemem()
+ void
+ pcf_acquire_all()
+ {
+-	struct pcf	*p;
+-	uint_t		i;
++	uint_t i;
+ 
+-	p = pcf;
+-	for (i = 0; i < pcf_fanout; i++) {
+-		mutex_enter(&p->pcf_lock);
+-		p++;
+-	}
++	for (i = 0; i < pcf_fanout; i++)
++		mutex_enter(&pcf[i].pcf_lock);
+ }
+ 
+ /*
+@@ -1401,14 +1408,10 @@ pcf_acquire_all()
+ void
+ pcf_release_all()
+ {
+-	struct pcf	*p;
+-	uint_t		i;
++	uint_t i;
+ 
+-	p = pcf;
+-	for (i = 0; i < pcf_fanout; i++) {
+-		mutex_exit(&p->pcf_lock);
+-		p++;
+-	}
++	for (i = 0; i < pcf_fanout; i++)
++		mutex_exit(&pcf[i].pcf_lock);
+ }
+ 
+ /*
+@@ -7101,7 +7104,7 @@ page_capture_init()
+ 	pc_thread_shortwait = 23 * hz;
+ 	pc_thread_longwait = 1201 * hz;
+ 	pc_thread_retry = 3;
+-	mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL);
++	mutex_init(&pc_thread_mutex, "page capture thr", MUTEX_DEFAULT, NULL);
+ 	cv_init(&pc_cv, NULL, CV_DEFAULT, NULL);
+ 	pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0,
+ 	    TS_RUN, minclsyspri);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/4.patch	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,476 @@
+recursion support
+diff --git a/usr/src/uts/common/Makefile.files b/usr/src/uts/common/Makefile.files
+index b805e03..d3fdf26 100644
+--- a/usr/src/uts/common/Makefile.files
++++ b/usr/src/uts/common/Makefile.files
+@@ -59,6 +59,7 @@ COMMON_CORE_OBJS +=		\
+ 		iscsiboot_prop.o	\
+ 		lgrp.o		\
+ 		lgrp_topo.o	\
++		lockdep.o	\
+ 		mmapobj.o	\
+ 		mutex.o		\
+ 		page_lock.o	\
+@@ -213,7 +214,6 @@ GENUNIX_OBJS +=	\
+ 		labelsys.o	\
+ 		link.o		\
+ 		list.o		\
+-		lockdep.o	\
+ 		lockstat_subr.o	\
+ 		log_sysevent.o	\
+ 		logsubr.o	\
+diff --git a/kernel/os/lockdep.c b/kernel/os/lockdep.c
+index edc8d8d..4519cff 100644
+--- a/kernel/os/lockdep.c
++++ b/kernel/os/lockdep.c
+@@ -1,10 +1,10 @@
+-#include <sys/lockdep.h>
+ #include <sys/thread.h>
+ #include <sys/types.h>
+ #include <sys/sysmacros.h>
+ #include <sys/sunddi.h>
+ #include <sys/avl.h>
+ #include <sys/list.h>
++#include <sys/lockdep.h>
+ 
+ #define LOCKDEP_DEP_LIST_SIZE		16
+ #define LOCKDEP_NUM_INFOS		(16 * 1024)	/* max number of live mutexes */
+@@ -18,6 +18,10 @@
+ /* we got a NULL class name in mutex_init */
+ #define NULL_LOCK_CLASS			"<null>"
+ 
++// XXX: define mutex_enter_recursive in a header
++
++#define LAST_ACQUIRED_LOCK()	(&curthread->t_heldlocks[curthread->t_nheldlocks - 1])
++
+ struct lock_class {
+ 	union {
+ 		avl_node_t	tree;
+@@ -68,6 +72,19 @@ __error_print_lock_info(struct lock_info *li)
+ 	MSG(" %p (%s)\n", li->li_lock, li->li_class->lc_name);
+ }
+ 
++static const char *
++__recursion_policy_name(kmutex_rec_pol_t policy)
++{
++	switch (policy) {
++		case LRP_NONE:
++			return "NONE";
++		case LRP_ADDR_ASC:
++			return "ADDR_ASC";
++	}
++
++	return "<invalid>";
++}
++
+ static void
+ error_mutex_reinitialization(kmutex_t *mutex, char *name)
+ {
+@@ -118,6 +135,33 @@ error_mutex_enter_recursive(struct lock_info *li, struct held_lock *hl)
+ }
+ 
+ static void
++error_mutex_enter_policy_mismatch(struct lock_info *li, struct held_lock *hl,
++				  kmutex_rec_pol_t policy)
++{
++	MSG("recursive locking policy mismatch detected\n");
++	__error_mutex_enter_heading(li);
++	MSG("with policy %s (%d), but thread is already holding lock:\n",
++	    __recursion_policy_name(policy), policy);
++	__error_print_lock_info(hl->hl_info);
++	MSG("with policy %s (%d)\n", __recursion_policy_name(hl->hl_policy),
++	    hl->hl_policy);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
++error_mutex_enter_policy_violation(struct lock_info *li, struct held_lock *hl)
++{
++	MSG("recursive locking policy violation detected\n");
++	__error_mutex_enter_heading(li);
++	MSG("with policy %s, but thread is already holding lock:\n",
++	    __recursion_policy_name(hl->hl_policy));
++	__error_print_lock_info(hl->hl_info);
++	debug_enter(lockdep_msgbuf);
++	lockdep_msgbuf_ptr = lockdep_msgbuf;
++}
++
++static void
+ error_mutex_exit_unknown(kmutex_t *mutex)
+ {
+ 	MSG("thread %p is trying to release an unknown mutex %p\n",
+@@ -234,7 +278,7 @@ alloc_lock_class(char *name)
+ }
+ 
+ static struct held_lock *
+-alloc_held_lock(struct lock_info *li)
++alloc_held_lock(struct lock_info *li, kmutex_rec_pol_t policy)
+ {
+ 	struct held_lock *hl;
+ 
+@@ -248,7 +292,9 @@ alloc_held_lock(struct lock_info *li)
+ 	hl = &curthread->t_heldlocks[curthread->t_nheldlocks - 1];
+ 
+ 	hl->hl_info = li;
+-	hl->hl_stacktrace_size = getpcstack(hl->hl_stacktrace,
++	hl->hl_policy = policy;
++	hl->hl_recursion = 1;
++	hl->hl_stacktrace_size = getpcstack((pc_t*)hl->hl_stacktrace,
+ 	    LOCKDEP_HELD_STACKTRACE_SIZE);
+ 
+ 	return (hl);
+@@ -372,8 +418,32 @@ out:
+ 	mutex_exit(&lockdep_lock);
+ }
+ 
++static boolean_t
++check_recursive_policy(struct lock_info *li, struct held_lock *hl)
++{
++	switch (hl->hl_policy) {
++		case LRP_NONE:
++			/* no one should be calling us with the none policy */
++			panic("%s called with LRP_NONE policy", __func__);
++			break;
++		case LRP_ADDR_ASC: {
++			uintptr_t last, this;
++
++			last = hl->hl_rec_last;
++			this = (uintptr_t)li->li_lock;
++
++			if (last > this)
++				return (B_FALSE);
++			break;
++		}
++	}
++
++	return (B_TRUE);
++}
++
+ static void
+-lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success)
++lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success,
++			   kmutex_rec_pol_t policy)
+ {
+ 	struct lock_class *lc;
+ 	struct lock_info *li;
+@@ -406,6 +476,41 @@ lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success)
+ 	if (curthread->t_nheldlocks) {
+ 		struct held_lock *cur;
+ 
++		/* check for ok recursive locking */
++		cur = LAST_ACQUIRED_LOCK();
++		if (cur->hl_info->li_class == li->li_class) {
++			if (cur->hl_policy != policy) {
++				error_mutex_enter_policy_mismatch(li, cur, policy);
++				goto brick;
++			}
++
++			if ((policy != LRP_NONE) &&
++			    (cur->hl_info->li_lock != mutex)) {
++				/*
++				 * someone is trying to lock a different
++				 * lock of the same class as the last
++				 * acquired lock, and is using
++				 * mutex_enter_recursive correctly
++				 */
++				if (!check_recursive_policy(li, cur)) {
++					error_mutex_enter_policy_violation(li, cur);
++					goto brick;
++				}
++
++				/*
++				 * policy was obeyed - update the state of
++				 * the held lock to account for the newly
++				 * held lock
++				 */
++				hl->hl_recursion++;
++				hl->hl_rec_last = (uintptr_t)mutex;
++				goto out;
++			}
++
++			/* just let the deadlock detector below deal with it */
++		}
++
++		/* check for deadlocks & bad recursive locking */
+ 		for (i = 0; i < curthread->t_nheldlocks; i++) {
+ 			cur = &curthread->t_heldlocks[i];
+ 
+@@ -421,7 +526,7 @@ lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success)
+ 	}
+ 
+ 	/* no issues, add the lock we're trying to get to the stack */
+-	hl = alloc_held_lock(li);
++	hl = alloc_held_lock(li, policy);
+ 	if (!hl)
+ 		goto err_nomem;
+ 
+@@ -465,12 +570,20 @@ lockdep_mutex_exit(kmutex_t *mutex)
+ 		/*
+ 		 * Found it.  Remove the held lock information.
+ 		 */
+-		if (i != curthread->t_nheldlocks - 1)
+-			memmove(&curthread->t_heldlocks[i],
+-				&curthread->t_heldlocks[i + 1],
+-				curthread->t_nheldlocks - i - 1);
+ 
+-		curthread->t_nheldlocks--;
++		if (hl->hl_recursion > 1) {
++			/* recursively held lock */
++			hl->hl_recursion--;
++		} else {
++			/* non-recursive, or last reference of a recursive */
++			if (i != curthread->t_nheldlocks - 1)
++				memmove(&curthread->t_heldlocks[i],
++					&curthread->t_heldlocks[i + 1],
++					curthread->t_nheldlocks - i - 1);
++
++			curthread->t_nheldlocks--;
++		}
++
+ 		goto out;
+ 	}
+ 
+@@ -544,7 +657,7 @@ mutex_init(kmutex_t *mutex, char *name, kmutex_type_t type, void *ibc)
+ void
+ mutex_destroy(kmutex_t *mutex)
+ {
+-	lockdep_mutex_destory(mutex);
++	lockdep_mutex_destroy(mutex);
+ 
+ 	do_mutex_destroy(mutex);
+ }
+@@ -552,7 +665,14 @@ mutex_destroy(kmutex_t *mutex)
+ void
+ mutex_enter(kmutex_t *mutex)
+ {
+-	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE);
++	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE, LRP_NONE);
++	do_mutex_enter(mutex);
++}
++
++void
++mutex_enter_recursive(kmutex_t *mutex, kmutex_rec_pol_t policy)
++{
++	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE, policy);
+ 	do_mutex_enter(mutex);
+ }
+ 
+@@ -561,9 +681,9 @@ mutex_tryenter(kmutex_t *mutex)
+ {
+ 	int ret;
+ 
+-	ret = do_mutex_enter(mutex);
++	ret = do_mutex_tryenter(mutex);
+ 
+-	lockdep_mutex_enter_common(mutex, B_TRUE, (boolean_t)ret);
++	lockdep_mutex_enter_common(mutex, B_TRUE, (boolean_t)ret, LRP_NONE);
+ 
+ 	return (ret);
+ }
+@@ -575,14 +695,4 @@ mutex_exit(kmutex_t *mutex)
+ 
+ 	do_mutex_exit(mutex);
+ }
+-#else
+-/*
+- * Not a debug kernel, just redirect execution to the assembly routines that
+- * actually lock the mutex.
+- */
+-#pragma weak mutex_init = do_mutex_init
+-#pragma weak mutex_destroy = do_mutex_destroy
+-#pragma weak mutex_enter = do_mutex_enter
+-#pragma weak mutex_tryenter = mutex_tryenter
+-#pragma weak mutex_exit = do_mutex_exit
+ #endif
+diff --git a/kernel/os/mutex.c b/kernel/os/mutex.c
+index 1509f7c..639458b 100644
+--- a/kernel/os/mutex.c
++++ b/kernel/os/mutex.c
+@@ -561,6 +561,9 @@ mutex_owner(const kmutex_t *mp)
+  *   eg adaptive mutexes created as static within the BSS or allocated
+  *      by kmem_zalloc().
+  */
++#ifndef DEBUG
++#pragma weak mutex_init = do_mutex_init
++#endif
+ /* ARGSUSED */
+ void
+ do_mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc)
+@@ -614,6 +617,9 @@ do_mutex_init(kmutex_t *mp, char *name, kmutex_type_t type, void *ibc)
+ 	}
+ }
+ 
++#ifndef DEBUG
++#pragma weak mutex_destroy = do_mutex_destroy
++#endif
+ void
+ do_mutex_destroy(kmutex_t *mp)
+ {
+diff --git a/include/sys/lockdep.h b/include/sys/lockdep.h
+index eac4f19..8c5d264 100644
+--- a/include/sys/lockdep.h
++++ b/include/sys/lockdep.h
+@@ -9,8 +9,17 @@ struct lock_info;
+ struct held_lock {
+ 	struct lock_info *hl_info;
+ 
++	/* recursive locking support */
++	kmutex_rec_pol_t hl_policy;
++	int hl_recursion;
++	uintptr_t hl_rec_last;
++
+ 	/* XXX: more context for of the mutex_enter? */
+-	pc_t hl_stacktrace[LOCKDEP_HELD_STACKTRACE_SIZE];
++	/*
++	 * really, should be a pc_t, but let's avoid including too many
++	 * headers here
++	 */
++	uintptr_t hl_stacktrace[LOCKDEP_HELD_STACKTRACE_SIZE];
+ 	int hl_stacktrace_size;
+ };
+ 
+diff --git a/include/sys/mutex.h b/include/sys/mutex.h
+index db34243..c811a49 100644
+--- a/include/sys/mutex.h
++++ b/include/sys/mutex.h
+@@ -60,6 +60,11 @@ typedef enum {
+ 	MUTEX_DEFAULT = 6	/* kernel default mutex */
+ } kmutex_type_t;
+ 
++typedef enum {
++	LRP_NONE,		/* not actually recursive */
++	LRP_ADDR_ASC		/* ascending address */
++} kmutex_rec_pol_t;
++
+ typedef struct mutex {
+ #ifdef _LP64
+ 	void	*_opaque[1];
+@@ -88,6 +93,7 @@ typedef struct pad_mutex {
+ extern	void	mutex_init(kmutex_t *, char *, kmutex_type_t, void *);
+ extern	void	mutex_destroy(kmutex_t *);
+ extern	void	mutex_enter(kmutex_t *);
++extern	void	mutex_enter_recursive(kmutex_t *, kmutex_rec_pol_t);
+ extern	int	mutex_tryenter(kmutex_t *);
+ extern	void	mutex_exit(kmutex_t *);
+ extern	int	mutex_owned(const kmutex_t *);
+diff --git a/arch/x86/kernel/ml/lock_prim.s b/arch/x86/kernel/ml/lock_prim.s
+index b6d226c..68f3a90 100644
+--- a/arch/x86/kernel/ml/lock_prim.s
++++ b/arch/x86/kernel/ml/lock_prim.s
+@@ -65,6 +65,15 @@ ulock_try(lock_t *lp)
+ { return (0); }
+ 
+ #else	/* __lint */
++
++#ifdef DEBUG
++#define	ENTRY_NONDEBUG(x)
++#define SET_SIZE_NONDEBUG(x)
++#else
++#define	ENTRY_NONDEBUG(x)	ALTENTRY(x)
++#define SET_SIZE_NONDEBUG(x)	SET_SIZE(x)
++#endif
++
+ 	.globl	kernelbase
+ 
+ #if defined(__amd64)
+@@ -556,6 +565,8 @@ do_mutex_exit(kmutex_t *lp)
+ #if defined(__amd64)
+ 
+ 	ENTRY_NP(do_mutex_enter)
++	ENTRY_NONDEBUG(mutex_enter)
++	ENTRY_NONDEBUG(mutex_enter_recursive)
+ 	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
+ 	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
+ 	lock
+@@ -594,6 +605,8 @@ do_mutex_exit(kmutex_t *lp)
+ 	movl	$1, %eax			/* return success if tryenter */
+ 	ret
+ 	SET_SIZE(lockstat_wrapper)
++	SET_SIZE_NONDEBUG(mutex_enter_recursive)
++	SET_SIZE_NONDEBUG(mutex_enter)
+ 	SET_SIZE(do_mutex_enter)
+ 
+ /*
+@@ -619,6 +632,7 @@ do_mutex_exit(kmutex_t *lp)
+ 
+ 
+ 	ENTRY(do_mutex_tryenter)
++	ENTRY_NONDEBUG(mutex_tryenter)
+ 	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
+ 	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
+ 	lock
+@@ -640,6 +654,7 @@ do_mutex_exit(kmutex_t *lp)
+ 	movq	%rdi, %rsi
+ 	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
+ 	jmp	lockstat_wrapper
++	SET_SIZE_NONDEBUG(mutex_tryenter)
+ 	SET_SIZE(do_mutex_tryenter)
+ 
+ 	ENTRY(mutex_adaptive_tryenter)
+@@ -694,6 +709,7 @@ mutex_owner_running_critical_size:
+ 	.globl	mutex_exit_critical_start
+ 
+ 	ENTRY(do_mutex_exit)
++	ENTRY_NONDEBUG(mutex_exit)
+ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	movq	%gs:CPU_THREAD, %rdx
+ 	cmpq	%rdx, (%rdi)
+@@ -705,6 +721,7 @@ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	movq	%rdi, %rsi
+ 	movl	$LS_MUTEX_EXIT_RELEASE, %edi
+ 	jmp	lockstat_wrapper
++	SET_SIZE_NONDEBUG(mutex_exit)
+ 	SET_SIZE(do_mutex_exit)
+ 
+ 	.globl	mutex_exit_critical_size
+@@ -717,6 +734,8 @@ mutex_exit_critical_size:
+ #else
+ 
+ 	ENTRY_NP(do_mutex_enter)
++	ENTRY_NONDEBUG(mutex_enter)
++	ENTRY_NONDEBUG(mutex_enter_recursive)
+ 	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
+ 	movl	4(%esp), %ecx			/* ecx = lock ptr */
+ 	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
+@@ -757,6 +776,8 @@ mutex_exit_critical_size:
+ 	popl	%ebp				/* pop off frame */
+ 	ret
+ 	SET_SIZE(lockstat_wrapper)
++	SET_SIZE_NONDEBUG(mutex_enter_recursive)
++	SET_SIZE_NONDEBUG(mutex_enter)
+ 	SET_SIZE(do_mutex_enter)
+ 
+ 	ENTRY(lockstat_wrapper_arg)	/* expects edx=thread, ecx=lock, */
+@@ -787,6 +808,7 @@ mutex_exit_critical_size:
+ 
+ 
+ 	ENTRY(do_mutex_tryenter)
++	ENTRY_NONDEBUG(mutex_tryenter)
+ 	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
+ 	movl	4(%esp), %ecx			/* ecx = lock ptr */
+ 	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
+@@ -808,6 +830,7 @@ mutex_exit_critical_size:
+ #endif	/* OPTERON_WORKAROUND_6323525 */
+ 	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
+ 	jmp	lockstat_wrapper
++	SET_SIZE_NONDEBUG(mutex_tryenter)
+ 	SET_SIZE(do_mutex_tryenter)
+ 
+ 	ENTRY(mutex_adaptive_tryenter)
+@@ -865,6 +888,7 @@ mutex_owner_running_critical_size:
+ 	.globl	mutex_exit_critical_start
+ 
+ 	ENTRY(do_mutex_exit)
++	ENTRY_NONDEBUG(mutex_exit)
+ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	movl	%gs:CPU_THREAD, %edx
+ 	movl	4(%esp), %ecx
+@@ -876,6 +900,7 @@ mutex_exit_critical_start:		/* If interrupted, restart here */
+ 	ret
+ 	movl	$LS_MUTEX_EXIT_RELEASE, %eax
+ 	jmp	lockstat_wrapper
++	SET_SIZE_NONDEBUG(mutex_exit)
+ 	SET_SIZE(do_mutex_exit)
+ 
+ 	.globl	mutex_exit_critical_size
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/5.patch	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,343 @@
+diff --git a/kernel/os/lockdep.c b/kernel/os/lockdep.c
+index 4519cff..466ddd4 100644
+--- a/kernel/os/lockdep.c
++++ b/kernel/os/lockdep.c
+@@ -60,29 +60,40 @@ static char *lockdep_msgbuf_ptr;
+ #define MSG(...)	lockdep_msgbuf_ptr += snprintf(lockdep_msgbuf_ptr, \
+ 				sizeof(lockdep_msgbuf) - (lockdep_msgbuf_ptr - lockdep_msgbuf), \
+ 				__VA_ARGS__)
++#define ENTER_DEBUGGER()	do { \
++					debug_enter(lockdep_msgbuf); \
++					lockdep_msgbuf_ptr = lockdep_msgbuf; \
++				} while (0)
++
++static const char
++__recursion_policy_char(kmutex_rec_pol_t policy)
++{
++	switch (policy) {
++		case LRP_NONE:
++			return '.';
++		case LRP_ADDR_ASC:
++			return '>';
++	}
++
++	return '?';
++}
+ 
+ /*
+  * There are various errors we may encounter.  Each of the following
+  * functions deals with a different error.
+  */
+ static void
+-__error_print_lock_info(struct lock_info *li)
++__error_print_lock_info(struct lock_info *li, kmutex_rec_pol_t policy)
+ {
+-
+-	MSG(" %p (%s)\n", li->li_lock, li->li_class->lc_name);
++	MSG(" %p (%s) <%c%c>\n", li->li_lock, li->li_class->lc_name,
++	    li->li_implicit_init ? 'I' : '.',
++	    __recursion_policy_char(policy));
+ }
+ 
+-static const char *
+-__recursion_policy_name(kmutex_rec_pol_t policy)
++static void
++__error_print_held_lock(struct held_lock *hl)
+ {
+-	switch (policy) {
+-		case LRP_NONE:
+-			return "NONE";
+-		case LRP_ADDR_ASC:
+-			return "ADDR_ASC";
+-	}
+-
+-	return "<invalid>";
++	__error_print_lock_info(hl->hl_info, hl->hl_policy);
+ }
+ 
+ static void
+@@ -90,15 +101,7 @@ error_mutex_reinitialization(kmutex_t *mutex, char *name)
+ {
+ 	MSG("mutex_init(%p, \"%s\", ...) called on an already initialized "
+ 	    "mutex\n", mutex, name);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
+-}
+-
+-static void
+-__error_mutex_enter_heading(struct lock_info *li)
+-{
+-	MSG("thread %p is trying to acquire lock:\n", curthread);
+-	__error_print_lock_info(li);
++	ENTER_DEBUGGER();
+ }
+ 
+ static void
+@@ -108,57 +111,33 @@ error_mutex_enter_unknown(kmutex_t *mutex)
+ 	    mutex);
+ 	MSG("Assuming adaptive mutex of class \"%s\"\n",
+ 	    UNKNOWN_LOCK_CLASS);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
+-}
+-
+-static void
+-error_mutex_enter_deadlock(struct lock_info *li, struct held_lock *hl)
+-{
+-	MSG("possible deadlock detected\n");
+-	__error_mutex_enter_heading(li);
+-	MSG("but thread is already holding it:\n");
+-	__error_print_lock_info(hl->hl_info);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	ENTER_DEBUGGER();
+ }
+ 
+ static void
+-error_mutex_enter_recursive(struct lock_info *li, struct held_lock *hl)
++error_mutex_msg(const char *op, const char *msg, struct lock_info *li,
++		kmutex_rec_pol_t policy, struct held_lock *hl)
+ {
+-	MSG("possible recursive locking detected\n");
+-	__error_mutex_enter_heading(li);
++	MSG("%s\n", msg);
++	MSG("thread %p is trying to %s lock:\n", curthread, op);
++	__error_print_lock_info(li, policy);
+ 	MSG("but thread is already holding lock:\n");
+-	__error_print_lock_info(hl->hl_info);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	__error_print_held_lock(hl);
++	ENTER_DEBUGGER();
+ }
+ 
+ static void
+-error_mutex_enter_policy_mismatch(struct lock_info *li, struct held_lock *hl,
+-				  kmutex_rec_pol_t policy)
+-{
+-	MSG("recursive locking policy mismatch detected\n");
+-	__error_mutex_enter_heading(li);
+-	MSG("with policy %s (%d), but thread is already holding lock:\n",
+-	    __recursion_policy_name(policy), policy);
+-	__error_print_lock_info(hl->hl_info);
+-	MSG("with policy %s (%d)\n", __recursion_policy_name(hl->hl_policy),
+-	    hl->hl_policy);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++error_mutex_enter(const char *msg, struct lock_info *li,
++		  kmutex_rec_pol_t policy, struct held_lock *hl)
++{
++	error_mutex_msg("acquire", msg, li, policy, hl);
+ }
+ 
+ static void
+-error_mutex_enter_policy_violation(struct lock_info *li, struct held_lock *hl)
+-{
+-	MSG("recursive locking policy violation detected\n");
+-	__error_mutex_enter_heading(li);
+-	MSG("with policy %s, but thread is already holding lock:\n",
+-	    __recursion_policy_name(hl->hl_policy));
+-	__error_print_lock_info(hl->hl_info);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++error_mutex_exit_policy_violation(struct lock_info *li, struct held_lock *hl)
++{
++	error_mutex_msg("release", "recursive locking policy violation detected",
++			li, LRP_NONE, hl);
+ }
+ 
+ static void
+@@ -166,8 +145,7 @@ error_mutex_exit_unknown(kmutex_t *mutex)
+ {
+ 	MSG("thread %p is trying to release an unknown mutex %p\n",
+ 	    curthread, mutex);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	ENTER_DEBUGGER();
+ }
+ 
+ static void
+@@ -175,9 +153,8 @@ error_mutex_exit_notowner(struct lock_info *li)
+ {
+ 	MSG("thread %p is trying to release a mutex it doesn't own:\n",
+ 	    curthread);
+-	__error_print_lock_info(li);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	__error_print_lock_info(li, LRP_NONE);
++	ENTER_DEBUGGER();
+ }
+ 
+ static void
+@@ -185,18 +162,16 @@ error_mutex_destroy_unknown(kmutex_t *mutex)
+ {
+ 	MSG("thread %p is trying to destroy an unknown mutex %p\n",
+ 	    curthread, mutex);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	ENTER_DEBUGGER();
+ }
+ 
+ static void
+-error_held_stack_overflow(struct lock_info *li)
++error_held_stack_overflow(struct lock_info *li, kmutex_rec_pol_t policy)
+ {
+ 	MSG("thread %p tried to hold onto too many locks (%d max)\n", curthread,
+ 	    LOCKDEP_HELD_STACK_SIZE);
+-	MSG("lock: %p name: \"%s\"\n", li->li_lock, li->li_class->lc_name);
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	__error_print_lock_info(li, policy);
++	ENTER_DEBUGGER();
+ }
+ 
+ /*
+@@ -285,7 +260,7 @@ alloc_held_lock(struct lock_info *li, kmutex_rec_pol_t policy)
+ 	curthread->t_nheldlocks++;
+ 
+ 	if (curthread->t_nheldlocks == LOCKDEP_HELD_STACK_SIZE) {
+-		error_held_stack_overflow(li);
++		error_held_stack_overflow(li, policy);
+ 		return (NULL);
+ 	}
+ 
+@@ -387,8 +362,7 @@ lockdep_mutex_init_unlocked(kmutex_t *mutex, char *name, int type, void *arg,
+ 
+ err:
+ 	MSG("failed to allocate memory\n");
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	ENTER_DEBUGGER();
+ 
+ 	return (NULL);
+ }
+@@ -480,7 +454,8 @@ lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success,
+ 		cur = LAST_ACQUIRED_LOCK();
+ 		if (cur->hl_info->li_class == li->li_class) {
+ 			if (cur->hl_policy != policy) {
+-				error_mutex_enter_policy_mismatch(li, cur, policy);
++				error_mutex_enter("recursive locking policy "
++				    "mismatch detected", li, policy, cur);
+ 				goto brick;
+ 			}
+ 
+@@ -493,7 +468,9 @@ lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success,
+ 				 * mutex_enter_recursive correctly
+ 				 */
+ 				if (!check_recursive_policy(li, cur)) {
+-					error_mutex_enter_policy_violation(li, cur);
++					error_mutex_enter("recursive locking "
++					    "policy violation detected", li,
++					    policy, cur);
+ 					goto brick;
+ 				}
+ 
+@@ -518,9 +495,11 @@ lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success,
+ 				continue;
+ 
+ 			if (cur->hl_info->li_lock == mutex)
+-				error_mutex_enter_deadlock(li, cur);
++				error_mutex_enter("possible deadlock detected",
++				    li, policy, cur);
+ 			else
+-				error_mutex_enter_recursive(li, cur);
++				error_mutex_enter("possible recursive locking "
++				    "detected", li, policy, cur);
+ 			goto brick;
+ 		}
+ 	}
+@@ -536,8 +515,7 @@ out:
+ 
+ err_nomem:
+ 	MSG("failed to allocate memory\n");
+-	debug_enter(lockdep_msgbuf);
+-	lockdep_msgbuf_ptr = lockdep_msgbuf;
++	ENTER_DEBUGGER();
+ 
+ brick:
+ #ifndef KEEP_GOING
+@@ -546,6 +524,21 @@ brick:
+ 	mutex_exit(&lockdep_lock);
+ }
+ 
++static void
++do_lockdep_mutex_exit(struct held_lock *hl)
++{
++	if (hl->hl_recursion > 1) {
++		/* recursively held lock */
++		hl->hl_recursion--;
++	} else {
++		/* non-recursive, or last reference of a recursive */
++		if (hl != LAST_ACQUIRED_LOCK())
++			memmove(hl, hl + 1, LAST_ACQUIRED_LOCK() - hl);
++
++		curthread->t_nheldlocks--;
++	}
++}
++
+ void
+ lockdep_mutex_exit(kmutex_t *mutex)
+ {
+@@ -561,29 +554,34 @@ lockdep_mutex_exit(kmutex_t *mutex)
+ 	if (!lockdep_enabled)
+ 		goto out;
+ 
+-	for (i = 0; i < curthread->t_nheldlocks; i++) {
++	li = lookup_lock_info(mutex);
++
++	for (i = 0; li && (i < curthread->t_nheldlocks); i++) {
+ 		hl = &curthread->t_heldlocks[i];
+ 
+-		if (hl->hl_info->li_lock != mutex)
++		if (hl->hl_info->li_class != li->li_class)
+ 			continue;
+ 
+ 		/*
+-		 * Found it.  Remove the held lock information.
++		 * Found a class match, but we're not out of the woods yet.
++		 * If the held lock info matches this lock exactly, we're
++		 * set.  If there is a mismatch, then then we have to check
++		 * if this could be a recursively locked mutex being
++		 * unlocked.
+ 		 */
+ 
+-		if (hl->hl_recursion > 1) {
+-			/* recursively held lock */
+-			hl->hl_recursion--;
+-		} else {
+-			/* non-recursive, or last reference of a recursive */
+-			if (i != curthread->t_nheldlocks - 1)
+-				memmove(&curthread->t_heldlocks[i],
+-					&curthread->t_heldlocks[i + 1],
+-					curthread->t_nheldlocks - i - 1);
+-
+-			curthread->t_nheldlocks--;
++		if ((hl->hl_info->li_lock != mutex) &&
++		    (hl->hl_policy == LRP_NONE))
++			break;
++
++		if ((hl->hl_policy != LRP_NONE) &&
++		    !check_recursive_policy(li, hl)) {
++			error_mutex_exit_policy_violation(li, hl);
++			goto brick;
+ 		}
+ 
++		do_lockdep_mutex_exit(hl);
++
+ 		goto out;
+ 	}
+ 
+@@ -593,12 +591,12 @@ lockdep_mutex_exit(kmutex_t *mutex)
+ 	 * trying to release it.
+ 	 */
+ 
+-	li = lookup_lock_info(mutex);
+ 	if (!li)
+ 		error_mutex_exit_unknown(mutex);
+ 	else
+ 		error_mutex_exit_notowner(li);
+ 
++brick:
+ #ifndef KEEP_GOING
+ 	lockdep_enabled = 0;
+ #endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/6.patch	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,113 @@
+ra-based auto-classification
+diff --git a/kernel/os/lockdep.c b/kernel/os/lockdep.c
+index 466ddd4..9186cb6 100644
+--- a/kernel/os/lockdep.c
++++ b/kernel/os/lockdep.c
+@@ -43,6 +43,7 @@ struct lock_info {
+ 
+ 	struct lock_class	*li_class;
+ 	kmutex_t		*li_lock;
++	void			*li_ra;
+ 	boolean_t		li_implicit_init;
+ };
+ 
+@@ -333,11 +334,18 @@ lockdep_init()
+ 
+ static struct lock_info *
+ lockdep_mutex_init_unlocked(kmutex_t *mutex, char *name, int type, void *arg,
+-    boolean_t implicit)
++    boolean_t implicit, void *ra)
+ {
++	char *raname[LOCKDEP_CLASS_NAME_LEN];
+ 	struct lock_class *lc;
+ 	struct lock_info *li;
+ 
++	if (!name) {
++		/* if we didn't get a name, use the return address */
++		snprintf(raname, sizeof(raname), "<%p>", ra);
++		name = raname;
++	}
++
+ 	lc = lookup_lock_class(name);
+ 	if (!lc) {
+ 		lc = alloc_lock_class(name);
+@@ -356,6 +364,7 @@ lockdep_mutex_init_unlocked(kmutex_t *mutex, char *name, int type, void *arg,
+ 	if (!li)
+ 		goto err;
+ 
++	li->li_ra = ra;
+ 	li->li_implicit_init = implicit;
+ 
+ 	return (li);
+@@ -368,7 +377,7 @@ err:
+ }
+ 
+ void
+-lockdep_mutex_init(kmutex_t *mutex, char *name, int type, void *arg)
++lockdep_mutex_init(kmutex_t *mutex, char *name, int type, void *arg, void *ra)
+ {
+ 	struct lock_info *li;
+ 
+@@ -380,7 +389,7 @@ lockdep_mutex_init(kmutex_t *mutex, char *name, int type, void *arg)
+ 	if (!lockdep_enabled)
+ 		goto out;
+ 
+-	li = lockdep_mutex_init_unlocked(mutex, name, type, arg, B_FALSE);
++	li = lockdep_mutex_init_unlocked(mutex, name, type, arg, B_FALSE, ra);
+ 	if (li)
+ 		goto out;
+ 
+@@ -417,7 +426,7 @@ check_recursive_policy(struct lock_info *li, struct held_lock *hl)
+ 
+ static void
+ lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success,
+-			   kmutex_rec_pol_t policy)
++			   kmutex_rec_pol_t policy, void *ra)
+ {
+ 	struct lock_class *lc;
+ 	struct lock_info *li;
+@@ -441,7 +450,7 @@ lockdep_mutex_enter_common(kmutex_t *mutex, boolean_t try, boolean_t success,
+ 		error_mutex_enter_unknown(mutex);
+ 
+ 		li = lockdep_mutex_init_unlocked(mutex, UNKNOWN_LOCK_CLASS,
+-		    MUTEX_ADAPTIVE, NULL, B_TRUE);
++		    MUTEX_ADAPTIVE, NULL, B_TRUE, ra);
+ 		if (!li)
+ 			goto brick;
+ 	}
+@@ -647,7 +656,7 @@ extern int do_mutex_tryenter(kmutex_t *);
+ void
+ mutex_init(kmutex_t *mutex, char *name, kmutex_type_t type, void *ibc)
+ {
+-	lockdep_mutex_init(mutex, name, type, ibc);
++	lockdep_mutex_init(mutex, name, type, ibc, caller());
+ 
+ 	do_mutex_init(mutex, name, type, ibc);
+ }
+@@ -663,14 +672,14 @@ mutex_destroy(kmutex_t *mutex)
+ void
+ mutex_enter(kmutex_t *mutex)
+ {
+-	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE, LRP_NONE);
++	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE, LRP_NONE, caller());
+ 	do_mutex_enter(mutex);
+ }
+ 
+ void
+ mutex_enter_recursive(kmutex_t *mutex, kmutex_rec_pol_t policy)
+ {
+-	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE, policy);
++	lockdep_mutex_enter_common(mutex, B_FALSE, B_FALSE, policy, caller());
+ 	do_mutex_enter(mutex);
+ }
+ 
+@@ -681,7 +690,8 @@ mutex_tryenter(kmutex_t *mutex)
+ 
+ 	ret = do_mutex_tryenter(mutex);
+ 
+-	lockdep_mutex_enter_common(mutex, B_TRUE, (boolean_t)ret, LRP_NONE);
++	lockdep_mutex_enter_common(mutex, B_TRUE, (boolean_t)ret, LRP_NONE,
++	    caller());
+ 
+ 	return (ret);
+ }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/patches/7.patch	Sun Mar 22 11:42:54 2020 -0400
@@ -0,0 +1,91 @@
+commit 808a00d5d067022165ba113ab2ee4a9ff9f2e9d0
+Author: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
+Date:   Thu Dec 17 19:21:09 2015 -0500
+
+    patch compile-fixes
+
+diff --git a/usr/src/uts/common/os/lockdep.c b/usr/src/uts/common/os/lockdep.c
+index 9186cb6..b8bb8da 100644
+--- a/usr/src/uts/common/os/lockdep.c
++++ b/usr/src/uts/common/os/lockdep.c
+@@ -15,8 +15,6 @@
+ 
+ /* we never saw a mutex_init */
+ #define UNKNOWN_LOCK_CLASS		"<unknown>"
+-/* we got a NULL class name in mutex_init */
+-#define NULL_LOCK_CLASS			"<null>"
+ 
+ // XXX: define mutex_enter_recursive in a header
+ 
+@@ -66,7 +64,7 @@ static char *lockdep_msgbuf_ptr;
+ 					lockdep_msgbuf_ptr = lockdep_msgbuf; \
+ 				} while (0)
+ 
+-static const char
++static char
+ __recursion_policy_char(kmutex_rec_pol_t policy)
+ {
+ 	switch (policy) {
+@@ -242,9 +240,6 @@ alloc_lock_class(char *name)
+ 	if (!lc)
+ 		return (NULL);
+ 
+-	if (!name)
+-		name = NULL_LOCK_CLASS;
+-
+ 	(void) strlcpy(lc->lc_name, name, LOCKDEP_CLASS_NAME_LEN);
+ 	lc->lc_ndeps = 0;
+ 
+@@ -292,16 +287,13 @@ lookup_lock_class(char *name)
+ {
+ 	struct lock_class key;
+ 
+-	if (!name)
+-		name = NULL_LOCK_CLASS;
+-
+ 	(void) strlcpy(key.lc_name, name, LOCKDEP_CLASS_NAME_LEN);
+ 
+ 	return (avl_find(&lockclass, &key, NULL));
+ }
+ 
+ void
+-lockdep_init()
++lockdep_init(void)
+ {
+ 	int i;
+ 
+@@ -323,10 +315,10 @@ lockdep_init()
+ 	for (i = 0; i < LOCKDEP_NUM_CLASSES; i++)
+ 		list_insert_head(&lockclass_freelist, &lockclass_slab[i]);
+ 
+-	mutex_init(&lockdep_lock, "lockdep state lock", MUTEX_SPIN, NULL);
++	mutex_init(&lockdep_lock, "lockdep state lock", MUTEX_SPIN,
++	    (void *)PIL_MAX);
+ 
+ 	/* set up the NULL and unknown lock classes */
+-	(void) alloc_lock_class(NULL_LOCK_CLASS);
+ 	(void) alloc_lock_class(UNKNOWN_LOCK_CLASS);
+ 
+ 	lockdep_enabled = 1;
+@@ -336,7 +328,7 @@ static struct lock_info *
+ lockdep_mutex_init_unlocked(kmutex_t *mutex, char *name, int type, void *arg,
+     boolean_t implicit, void *ra)
+ {
+-	char *raname[LOCKDEP_CLASS_NAME_LEN];
++	char raname[LOCKDEP_CLASS_NAME_LEN];
+ 	struct lock_class *lc;
+ 	struct lock_info *li;
+ 
+diff --git a/usr/src/uts/common/sys/lockdep.h b/usr/src/uts/common/sys/lockdep.h
+index 8c5d264..5c13d83 100644
+--- a/usr/src/uts/common/sys/lockdep.h
++++ b/usr/src/uts/common/sys/lockdep.h
+@@ -2,7 +2,7 @@
+ #define	_LOCKDEP_H
+ 
+ #define	LOCKDEP_HELD_STACK_SIZE		16
+-#define LOCKDEP_HELD_STACKTRACE_SIZE	16
++#define	LOCKDEP_HELD_STACKTRACE_SIZE	16
+ 
+ struct lock_info;
+