3 */
4 int workq_add (workq_t *wq, void *element)
5 {
6 workq_ele_t *item;
7 pthread_t id;
8 int status;
9
10 if (wq->valid != WORKQ_VALID)
11 return EINVAL;
12
13 /*
14 * Create and initialize a request structure.
15 */
16 item = (workq_ele_t *)malloc (sizeof (workq_ele_t));
17 if (item == NULL)
18 return ENOMEM;
19 item->data = element;
20 item->next = NULL;
21 status = pthread_mutex_lock (&wq->mutex);
22 if (status != 0) {
23 free (item);
24 return status;
25 }
26
27 /*
28 * Add the request to the end of the queue, updating the
29 * first and last pointers.
30 */
31 if (wq->first == NULL)
32 wq->first = item;
33 else
34 wq->last->next = item;
35 wq->last = item;
36
37 /*
38 * if any threads are idling, wake one.
39 */
40 if (wq->idle > 0) {
41 status = pthread_cond_signal (&wq->cv);
42 if (status != 0) {
43 pthread_mutex_unlock (&wq->mutex);
44 return status;
45 }
46 } else if (wq->counter < wq->parallelism) {
47 /*
48 * If there were no idling threads, and we're allowed to
49 * create a new thread, do so.
50 */
51 DPRINTF (('Creating new worker
'));
52 status = pthread_create (
53 &id, &wq->attr, workq_server, (void*)wq);
54 if (status != 0) {
55 pthread_mutex_unlock (&wq->mutex);
56 return status;
57 }
58 wq->counter++;
59 }
60 pthread_mutex_unlock (&wq->mutex);
61 return 0;
That takes care of all the external interfaces, but we will need one more function, the start function for the engine threads. The function, shown in part 4, is called workq_server
. Although we could start a thread running the caller's engine with the appropriate argument for each request, this is more efficient. The workq_server
function will dequeue the next request and pass it to the engine function, then look for new work. It will wait if necessary and shut down only when a certain period of time passes without any new work appearing, or when told to shut down by workq_destroy
.
Notice that the server begins by locking the work queue mutex, and the 'matching' unlock does not occur until the engine thread is ready to terminate. Despite this, the thread spends most of its life with the mutex unlocked, either waiting for work in the condition variable wait or within the caller's engine function.
29-62 When a thread completes the condition wait loop, either there is work to be done or the work queue is shutting down (wq->quit
is nonzero).
67-80 First, we check for work and process the work queue element if there is one. There could still be work queued when workq_destroy is called, and it must all be processed before any engine thread terminates.
The user's engine function is called with the mutex unlocked, so that the user's engine can run a long time, or block, without affecting the execution of other engine threads. That does not necessarily mean that engine functions can run in parallel — the caller-supplied engine function is responsible for ensuring whatever synchronization is needed to allow the desired level of concurrency or parallelism. Ideal engine functions would require little or no synchronization and would run in parallel.
86-104 When there is no more work and the queue is being shut down, the thread terminates, awakening workq_destroy if this was the last engine thread to shut down.
110-114 Finally we check whether the engine thread timed out looking for work, which
would mean the engine has waited long enough. If there's still no work to be found, the engine thread exits.
¦ workq.c part 4 workq_server
1 /*
2 * Thread start routine to serve the work queue.
3 */
4 static void *workq_server (void *arg)
5 {
6 struct timespec timeout;
7 workq_t *wq = (workq_t *)arg;
8 workq_ele_t *we;
9 int status, timedout;
10
11 /*
12 * We don't need to validate the workq_t here... we don't
13 * create server threads until requests are queued (the
14 * queue has been initialized by then!) and we wait for all
15 * server threads to terminate before destroying a work