# HG changeset patch # User Josef 'Jeff' Sipek # Date 1498591976 -10800 # Node ID c611dfebe78a7875ca56871a0447a3283bcae818 # Parent 777783f32bc0e32b9c6ae038f945947ba70f81ce WIP: kernel: classify locks diff -r 777783f32bc0 -r c611dfebe78a kernel/os/brand.c --- a/kernel/os/brand.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/brand.c Tue Jun 27 22:32:56 2017 +0300 @@ -76,7 +76,7 @@ void brand_init() { - mutex_init(&brand_list_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&brand_list_lock, "brand list", MUTEX_DEFAULT, NULL); p0.p_brand = &native_brand; } diff -r 777783f32bc0 -r c611dfebe78a kernel/os/devcfg.c --- a/kernel/os/devcfg.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/devcfg.c Tue Jun 27 22:32:56 2017 +0300 @@ -382,14 +382,15 @@ "i_ddi_alloc_node: name=%s id=%d\n", node_name, devi->devi_nodeid)); cv_init(&(devi->devi_cv), NULL, CV_DEFAULT, NULL); - mutex_init(&(devi->devi_lock), NULL, MUTEX_DEFAULT, NULL); - mutex_init(&(devi->devi_pm_lock), NULL, MUTEX_DEFAULT, NULL); - mutex_init(&(devi->devi_pm_busy_lock), NULL, MUTEX_DEFAULT, NULL); + mutex_init(&devi->devi_lock, "devi", MUTEX_DEFAULT, NULL); + mutex_init(&devi->devi_pm_lock, "devi pm", MUTEX_DEFAULT, NULL); + mutex_init(&devi->devi_pm_busy_lock, "devi pm busy", MUTEX_DEFAULT, + NULL); RIO_TRACE((CE_NOTE, "i_ddi_alloc_node: Initing contract fields: " "dip=%p, name=%s", (void *)devi, node_name)); - mutex_init(&(devi->devi_ct_lock), NULL, MUTEX_DEFAULT, NULL); + mutex_init(&devi->devi_ct_lock, "devi ct", MUTEX_DEFAULT, NULL); cv_init(&(devi->devi_ct_cv), NULL, CV_DEFAULT, NULL); devi->devi_ct_count = -1; /* counter not in use if -1 */ list_create(&(devi->devi_ct), sizeof (cont_device_t), @@ -3282,7 +3283,7 @@ logsize = MAX_DEVINFO_LOG_SIZE; dh = kmem_alloc(logsize * PAGESIZE, KM_SLEEP); - mutex_init(&dh->dh_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&dh->dh_lock, "devi log header", MUTEX_DEFAULT, NULL); dh->dh_max = ((logsize * PAGESIZE) - sizeof (*dh)) / sizeof (devinfo_audit_t) + 1; dh->dh_curr = -1; @@ -7505,7 +7506,7 @@ { struct mt_config_handle *hdl = kmem_alloc(sizeof (*hdl), KM_SLEEP); - mutex_init(&hdl->mtc_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&hdl->mtc_lock, "MT config handle", MUTEX_DEFAULT, NULL); cv_init(&hdl->mtc_cv, NULL, CV_DEFAULT, NULL); hdl->mtc_pdip = pdip; hdl->mtc_fdip = dipp; diff -r 777783f32bc0 -r c611dfebe78a kernel/os/lgrp.c --- a/kernel/os/lgrp.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/lgrp.c Tue Jun 27 22:32:56 2017 +0300 @@ -967,7 +967,7 @@ { lgrp_stat_t stat; - mutex_init(&lgrp_kstat_mutex, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&lgrp_kstat_mutex, "lgrp kstat", MUTEX_DEFAULT, NULL); for (stat = 0; stat < LGRP_NUM_STATS; stat++) kstat_named_init(&lgrp_kstat_data[stat], diff -r 777783f32bc0 -r c611dfebe78a kernel/os/logsubr.c --- a/kernel/os/logsubr.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/logsubr.c Tue Jun 27 22:32:56 2017 +0300 @@ -139,7 +139,7 @@ q->q_nfsrv = q; q->q_lowat = lowat; q->q_hiwat = hiwat; - mutex_init(QLOCK(q), NULL, MUTEX_DRIVER, ibc); + mutex_init(QLOCK(q), "log queue", MUTEX_DRIVER, ibc); return (q); } diff -r 777783f32bc0 -r c611dfebe78a kernel/os/main.c --- a/kernel/os/main.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/main.c Tue Jun 27 22:32:56 2017 +0300 @@ -398,6 +398,8 @@ mutex_init(&ualock, "ualock", MUTEX_DEFAULT, NULL); mutex_enter(&ualock); + mutex_init(&cpu_lock, "cpu", MUTEX_DEFAULT, NULL); + /* * Setup root lgroup and leaf lgroup for CPU 0 */ diff -r 777783f32bc0 -r c611dfebe78a kernel/os/sched.c --- a/kernel/os/sched.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/sched.c Tue Jun 27 22:32:56 2017 +0300 @@ -67,7 +67,7 @@ callb_cpr_t cprinfo; kmutex_t swap_cpr_lock; - mutex_init(&swap_cpr_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&swap_cpr_lock, "swap cpr lock", MUTEX_DEFAULT, NULL); CALLB_CPR_INIT(&cprinfo, &swap_cpr_lock, callb_generic_cpr, "sched"); for (;;) { diff -r 777783f32bc0 -r c611dfebe78a kernel/os/task.c --- a/kernel/os/task.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/task.c Tue Jun 27 22:32:56 2017 +0300 @@ -1091,7 +1091,7 @@ void task_commit_thread_init() { - mutex_init(&task_commit_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&task_commit_lock, "task commit", MUTEX_DEFAULT, NULL); cv_init(&task_commit_cv, NULL, CV_DEFAULT, NULL); task_commit_thread = thread_create(NULL, 0, task_commit, NULL, 0, &p0, TS_RUN, minclsyspri); diff -r 777783f32bc0 -r c611dfebe78a kernel/os/taskq.c --- a/kernel/os/taskq.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/taskq.c Tue Jun 27 22:32:56 2017 +0300 @@ -774,7 +774,7 @@ bzero(tq, sizeof (taskq_t)); - mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&tq->tq_lock, "taskq", MUTEX_DEFAULT, NULL); rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL); cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL); cv_init(&tq->tq_exit_cv, NULL, CV_DEFAULT, NULL); @@ -1979,8 +1979,8 @@ /* Initialize each bucket */ for (b_id = 0; b_id < bsize; b_id++, bucket++) { - mutex_init(&bucket->tqbucket_lock, NULL, MUTEX_DEFAULT, - NULL); + mutex_init(&bucket->tqbucket_lock, "taskq bucket", + MUTEX_DEFAULT, NULL); cv_init(&bucket->tqbucket_cv, NULL, CV_DEFAULT, NULL); bucket->tqbucket_taskq = tq; bucket->tqbucket_freelist.tqent_next = diff -r 777783f32bc0 -r c611dfebe78a kernel/os/timer.c --- a/kernel/os/timer.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/timer.c Tue Jun 27 22:32:56 2017 +0300 @@ -552,7 +552,7 @@ */ it = kmem_cache_alloc(clock_timer_cache, KM_SLEEP); bzero(it, sizeof (itimer_t)); - mutex_init(&it->it_mutex, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&it->it_mutex, "itimer", MUTEX_DEFAULT, NULL); sigq = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); mutex_enter(&p->p_lock); diff -r 777783f32bc0 -r c611dfebe78a kernel/os/zone.c --- a/kernel/os/zone.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/os/zone.c Tue Jun 27 22:32:56 2017 +0300 @@ -252,6 +252,10 @@ #include #include +#define LOCK_CLASS_ZONE "zone" +#define LOCK_CLASS_ZONE_NLWPS "zone nlwps" +#define LOCK_CLASS_ZONE_MEM "zone mem" + /* * This constant specifies the number of seconds that threads waiting for * subsystems to release a zone's general-purpose references will wait before @@ -2049,8 +2053,8 @@ void zone_zsd_init(void) { - mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&zonehash_lock, "zone hash", MUTEX_DEFAULT, NULL); + mutex_init(&zsd_key_lock, "zsd key", MUTEX_DEFAULT, NULL); list_create(&zsd_registered_keys, sizeof (struct zsd_entry), offsetof(struct zsd_entry, zsd_linkage)); list_create(&zone_active, sizeof (zone_t), @@ -2058,9 +2062,11 @@ list_create(&zone_deathrow, sizeof (zone_t), offsetof(zone_t, zone_linkage)); - mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&zone0.zone_mem_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&zone0.zone_lock, LOCK_CLASS_ZONE, MUTEX_DEFAULT, NULL); + mutex_init(&zone0.zone_nlwps_lock, LOCK_CLASS_ZONE_NLWPS, + MUTEX_DEFAULT, NULL); + mutex_init(&zone0.zone_mem_lock, LOCK_CLASS_ZONE_MEM, MUTEX_DEFAULT, + NULL); zone0.zone_shares = 1; zone0.zone_nlwps = 0; zone0.zone_nlwps_ctl = INT_MAX; @@ -4338,9 +4344,11 @@ zone->zone_restart_init = B_TRUE; zone->zone_brand = &native_brand; zone->zone_initname = NULL; - mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&zone->zone_mem_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&zone->zone_lock, LOCK_CLASS_ZONE, MUTEX_DEFAULT, NULL); + mutex_init(&zone->zone_nlwps_lock, LOCK_CLASS_ZONE_NLWPS, + MUTEX_DEFAULT, NULL); + mutex_init(&zone->zone_mem_lock, LOCK_CLASS_ZONE_MEM, MUTEX_DEFAULT, + NULL); cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL); list_create(&zone->zone_ref_list, sizeof (zone_ref_t), offsetof(zone_ref_t, zref_linkage)); diff -r 777783f32bc0 -r c611dfebe78a kernel/vm/seg_vn.c --- a/kernel/vm/seg_vn.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/vm/seg_vn.c Tue Jun 27 22:32:56 2017 +0300 @@ -251,7 +251,7 @@ struct segvn_data *svd = buf; rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); - mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&svd->segfree_syncmtx, "segvn segfree", MUTEX_DEFAULT, NULL); svd->svn_trnext = svd->svn_trprev = NULL; return (0); } diff -r 777783f32bc0 -r c611dfebe78a kernel/vm/vm_page.c --- a/kernel/vm/vm_page.c Tue Jun 27 22:39:06 2017 +0300 +++ b/kernel/vm/vm_page.c Tue Jun 27 22:32:56 2017 +0300 @@ -340,6 +340,8 @@ void pcf_init(void) { + unsigned int i; + if (boot_ncpus != -1) { pcf_fanout = boot_ncpus; } else { @@ -364,6 +366,13 @@ } } pcf_fanout_mask = pcf_fanout - 1; + + /* + * Initialize the pcf mutexes. + */ + for (i = 0; i < pcf_fanout; i++) + mutex_init(&pcf[i].pcf_lock, "page cache/free", MUTEX_DEFAULT, + NULL); } /* @@ -374,6 +383,9 @@ { boolean_t callb_vm_cpr(void *, int); + mutex_init(&freemem_lock, "freemem", MUTEX_DEFAULT, NULL); + mutex_init(&new_freemem_lock, "new freemem", MUTEX_DEFAULT, NULL); + (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm"); page_init_mem_config(); page_retire_init(); @@ -1239,14 +1251,10 @@ void pcf_acquire_all() { - struct pcf *p; - uint_t i; - - p = pcf; - for (i = 0; i < pcf_fanout; i++) { - mutex_enter(&p->pcf_lock); - p++; - } + uint_t i; + + for (i = 0; i < pcf_fanout; i++) + mutex_enter(&pcf[i].pcf_lock); } /* @@ -1255,14 +1263,10 @@ void pcf_release_all() { - struct pcf *p; - uint_t i; - - p = pcf; - for (i = 0; i < pcf_fanout; i++) { - mutex_exit(&p->pcf_lock); - p++; - } + uint_t i; + + for (i = 0; i < pcf_fanout; i++) + mutex_exit(&pcf[i].pcf_lock); } /* @@ -6791,7 +6795,7 @@ pc_thread_shortwait = 23 * hz; pc_thread_longwait = 1201 * hz; pc_thread_retry = 3; - mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&pc_thread_mutex, "page capture thr", MUTEX_DEFAULT, NULL); cv_init(&pc_cv, NULL, CV_DEFAULT, NULL); pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0, TS_RUN, minclsyspri);