comparison synch.c @ 771:01848cbed620

synch: move mutex lock checking into a helper function Signed-off-by: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
author Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
date Thu, 25 Jul 2019 14:19:31 -0400
parents 8f41dc0ec42e
children f2c96ac96787
comparison
equal deleted inserted replaced
770:8f41dc0ec42e 771:01848cbed620
486 __bad_magic(info, op, where, expected_type); 486 __bad_magic(info, op, where, expected_type);
487 else if (info->type != expected_type) 487 else if (info->type != expected_type)
488 __bad_type(info, op, where, expected_type); 488 __bad_type(info, op, where, expected_type);
489 } 489 }
490 490
491 static void check_unheld_for_lock(struct lock_info *info,
492 const struct lock_context *where)
493 {
494 #ifdef JEFFPC_LOCK_TRACKING
495 struct held_lock *held;
496 size_t i;
497
498 if (!atomic_read(&lockdep_on))
499 return;
500
501 /* check for deadlocks & recursive locking */
502 for_each_held_lock(i, held) {
503 if ((held->info != info) && (held->info->lc != info->lc))
504 continue;
505
506 error_lock(held, info, where);
507 return;
508 }
509
510 /* check for circular dependencies */
511 if (check_circular_deps(info, where))
512 return;
513
514 held = held_stack_alloc();
515 if (!held) {
516 error_alloc(info, where, "lock nesting limit reached");
517 return;
518 }
519
520 held->info = info;
521 held->where = *where;
522 held->type = info->type;
523 #endif
524 }
525
491 static void verify_lock_init(const struct lock_context *where, struct lock *l, 526 static void verify_lock_init(const struct lock_context *where, struct lock *l,
492 struct lock_class *lc) 527 struct lock_class *lc)
493 { 528 {
494 if (!l || !lc) 529 if (!l || !lc)
495 print_invalid_call("MXINIT", where); 530 print_invalid_call("MXINIT", where);
534 { 569 {
535 if (!l) 570 if (!l)
536 print_invalid_call("MXLOCK", where); 571 print_invalid_call("MXLOCK", where);
537 572
538 check_magic(&l->info, "acquire", where, SYNCH_TYPE_MUTEX); 573 check_magic(&l->info, "acquire", where, SYNCH_TYPE_MUTEX);
539 574 check_unheld_for_lock(&l->info, where);
540 #ifdef JEFFPC_LOCK_TRACKING
541 struct held_lock *held;
542 size_t i;
543
544 if (!atomic_read(&lockdep_on))
545 return;
546
547 /* check for deadlocks & recursive locking */
548 for_each_held_lock(i, held) {
549 if ((held->info != &l->info) && (held->info->lc != l->info.lc))
550 continue;
551
552 if (held->info == &l->info)
553 sanity_check_held_synch_type(held, SYNCH_TYPE_MUTEX);
554
555 error_lock(held, &l->info, where);
556 return;
557 }
558
559 /* check for circular dependencies */
560 if (check_circular_deps(&l->info, where))
561 return;
562
563 held = held_stack_alloc();
564 if (!held) {
565 error_alloc(&l->info, where, "lock nesting limit reached");
566 return;
567 }
568
569 held->info = &l->info;
570 held->where = *where;
571 held->type = SYNCH_TYPE_MUTEX;
572 #endif
573 } 575 }
574 576
575 static void verify_lock_unlock(const struct lock_context *where, struct lock *l) 577 static void verify_lock_unlock(const struct lock_context *where, struct lock *l)
576 { 578 {
577 if (!l) 579 if (!l)