33 if (status != 0)
34 err_abort (status, 'First lock');
35 } else {
36 if (backoff)
37 status = pthread_mutex_trylock (&mutex[i]);
38 else
39 status = pthread_mutex_lock (&mutex[i]);
40 if (status == EBUSY) {
41 backoffs++;
42 DPRINTF ((
43 ' [forward locker backing off at %d]
',
44 i));
45 for (; i >= 0; i--) {
46 status = pthread_mutex_unlock (&mutex[i]);
47 if (status != 0)
48 err_abort (status, 'Backoff');
49 }
50 } else {
51 if (status != 0)
52 err_abort (status, 'Lock mutex');
53 DPRINTF ((' forward locker got %d
', i));
54 }
55 }
56 /*
57 * Yield processor, if needed to be sure locks get
58 * interleaved on a uniprocessor.
59 */
60 if (yield_flag) {
61 if (yield_flag > 0)
62 sched_yield ();
63 else
64 sleep (1);
65 }
66 }
67 /*
68 * Report that we got 'em, and unlock to try again.
69 */
70 printf (
71 'lock forward got all locks, %d backoffs
', backoffs);
72 pthread_mutex_unlock (&mutex[2]);
73 pthread_mutex_unlock (&mutex[1]);
74 pthread_mutex_unlock (&mutex[0]);
75 sched_yield ();
76 }
77 return NULL;
78 }
79
80 /*
81 * This is a thread start routine that locks all mutexes in
82 * reverse order, to ensure a conflict with lock_forward, which
83 * does the opposite.
84 */
85 void *lock_backward (void *arg)
86 {
87 int i, iterate, backoffs;
88 int status;
89
90 for (iterate = 0; iterate < ITERATIONS; iterate++) {
91 backoffs = 0;
92 for (i = 2; i >= 0; i--) {
93 if (i == 2) {
94 status = pthread_mutex_lock (&mutex[i]);
95 if (status != 0)
96 err_abort (status, 'First lock');
97 } else {
98 if (backoff)
99 status = pthread_mutex_trylock (&mutex[i]);
100 else
101 status = pthread_mutex_lock (&mutex[i]);
102 if (status == EBUSY) {
103 backoffs++;
104 DPRINTF ((
105 ' [backward locker backing off at %d]
',
106 i));
107 for (; i < 3; i++) {
108 status = pthread_mutex_unlock (&mutex[i]);
109 if (status != 0)
110 err_abort (status, 'Backoff');
111 }
112 } else {
113 if (status != 0)
114 err_abort (status, 'Lock mutex');
115 DPRINTF ((' backward locker got %d
', i));
116 }
117 }
118 /*
119 * Yield processor, if needed to be sure locks get
120 * interleaved on a uniprocessor.
121 */
122 if (yield_flag) {
123 if (yield_flag > 0)
124 sched_yield ();
125 else
126 sleep (1);
127 }
128 }
129 /*
130 * Report that we got 'em, and unlock to try again.
131 */