Print this page
9017 Introduce taskq_empty()
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Yuri Pankov <yuripv@yuripv.net>


   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  28  * Copyright (c) 2017 by Delphix. All rights reserved.

  29  */
  30 
  31 /*
  32  * Kernel task queues: general-purpose asynchronous task scheduling.
  33  *
  34  * A common problem in kernel programming is the need to schedule tasks
  35  * to be performed later, by another thread. There are several reasons
  36  * you may want or need to do this:
  37  *
  38  * (1) The task isn't time-critical, but your current code path is.
  39  *
  40  * (2) The task may require grabbing locks that you already hold.
  41  *
  42  * (3) The task may need to block (e.g. to wait for memory), but you
  43  *     cannot block in your current context.
  44  *
  45  * (4) Your code path can't complete because of some condition, but you can't
  46  *     sleep or fail, so you queue the task for later execution when condition
  47  *     disappears.
  48  *


 183  *              dynamic task queues if TQ_NOQUEUE is also specified, otherwise
 184  *              always succeed.
 185  *
 186  *        TQ_FRONT:   Puts the new task at the front of the queue.  Be careful.
 187  *
 188  *      NOTE: Dynamic task queues are much more likely to fail in
 189  *              taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
 190  *              is important to have backup strategies handling such failures.
 191  *
 192  * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
 193  *
 194  *      This is a light-weight form of taskq_dispatch(), that uses a
 195  *      preallocated taskq_ent_t structure for scheduling.  As a
 196  *      result, it does not perform allocations and cannot ever fail.
 197  *      Note especially that it cannot be used with TASKQ_DYNAMIC
 198  *      taskqs.  The memory for the tqent must not be modified or used
 199  *      until the function (func) is called.  (However, func itself
 200  *      may safely modify or free this memory, once it is called.)
 201  *      Note that the taskq framework will NOT free this memory.
 202  *




 203  * void taskq_wait(tq):
 204  *
 205  *      Waits for all previously scheduled tasks to complete.
 206  *
 207  *      NOTE: It does not stop any new task dispatches.
 208  *            Do NOT call taskq_wait() from a task: it will cause deadlock.
 209  *
 210  * void taskq_suspend(tq)
 211  *
 212  *      Suspend all task execution. Tasks already scheduled for a dynamic task
 213  *      queue will still be executed, but all new scheduled tasks will be
 214  *      suspended until taskq_resume() is called.
 215  *
 216  * int  taskq_suspended(tq)
 217  *
 218  *      Returns 1 if taskq is suspended and 0 otherwise. It is intended to
 219  *      ASSERT that the task queue is suspended.
 220  *
 221  * void taskq_resume(tq)
 222  *


1302         ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
1303 
1304         /*
1305          * Mark it as a prealloc'd task.  This is important
1306          * to ensure that we don't free it later.
1307          */
1308         tqe->tqent_un.tqent_flags |= TQENT_FLAG_PREALLOC;
1309         /*
1310          * Enqueue the task to the underlying queue.
1311          */
1312         mutex_enter(&tq->tq_lock);
1313 
1314         if (flags & TQ_FRONT) {
1315                 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1316         } else {
1317                 TQ_ENQUEUE(tq, tqe, func, arg);
1318         }
1319         mutex_exit(&tq->tq_lock);
1320 }
1321 
















1322 /*
1323  * Wait for all pending tasks to complete.
1324  * Calling taskq_wait from a task will cause deadlock.
1325  */
1326 void
1327 taskq_wait(taskq_t *tq)
1328 {
1329         ASSERT(tq != curthread->t_taskq);
1330 
1331         mutex_enter(&tq->tq_lock);
1332         while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
1333                 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1334         mutex_exit(&tq->tq_lock);
1335 
1336         if (tq->tq_flags & TASKQ_DYNAMIC) {
1337                 taskq_bucket_t *b = tq->tq_buckets;
1338                 int bid = 0;
1339                 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1340                         mutex_enter(&b->tqbucket_lock);
1341                         while (b->tqbucket_nalloc > 0)




   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 /*
  27  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  28  * Copyright (c) 2017 by Delphix. All rights reserved.
  29  * Copyright 2018, Joyent, Inc.
  30  */
  31 
  32 /*
  33  * Kernel task queues: general-purpose asynchronous task scheduling.
  34  *
  35  * A common problem in kernel programming is the need to schedule tasks
  36  * to be performed later, by another thread. There are several reasons
  37  * you may want or need to do this:
  38  *
  39  * (1) The task isn't time-critical, but your current code path is.
  40  *
  41  * (2) The task may require grabbing locks that you already hold.
  42  *
  43  * (3) The task may need to block (e.g. to wait for memory), but you
  44  *     cannot block in your current context.
  45  *
  46  * (4) Your code path can't complete because of some condition, but you can't
  47  *     sleep or fail, so you queue the task for later execution when condition
  48  *     disappears.
  49  *


 184  *              dynamic task queues if TQ_NOQUEUE is also specified, otherwise
 185  *              always succeed.
 186  *
 187  *        TQ_FRONT:   Puts the new task at the front of the queue.  Be careful.
 188  *
 189  *      NOTE: Dynamic task queues are much more likely to fail in
 190  *              taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
 191  *              is important to have backup strategies handling such failures.
 192  *
 193  * void taskq_dispatch_ent(tq, func, arg, flags, tqent)
 194  *
 195  *      This is a light-weight form of taskq_dispatch(), that uses a
 196  *      preallocated taskq_ent_t structure for scheduling.  As a
 197  *      result, it does not perform allocations and cannot ever fail.
 198  *      Note especially that it cannot be used with TASKQ_DYNAMIC
 199  *      taskqs.  The memory for the tqent must not be modified or used
 200  *      until the function (func) is called.  (However, func itself
 201  *      may safely modify or free this memory, once it is called.)
 202  *      Note that the taskq framework will NOT free this memory.
 203  *
 204  * boolean_t taskq_empty(tq)
 205  *
 206  *      Queries if there are tasks pending on the queue.
 207  *
 208  * void taskq_wait(tq):
 209  *
 210  *      Waits for all previously scheduled tasks to complete.
 211  *
 212  *      NOTE: It does not stop any new task dispatches.
 213  *            Do NOT call taskq_wait() from a task: it will cause deadlock.
 214  *
 215  * void taskq_suspend(tq)
 216  *
 217  *      Suspend all task execution. Tasks already scheduled for a dynamic task
 218  *      queue will still be executed, but all new scheduled tasks will be
 219  *      suspended until taskq_resume() is called.
 220  *
 221  * int  taskq_suspended(tq)
 222  *
 223  *      Returns 1 if taskq is suspended and 0 otherwise. It is intended to
 224  *      ASSERT that the task queue is suspended.
 225  *
 226  * void taskq_resume(tq)
 227  *


1307         ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
1308 
1309         /*
1310          * Mark it as a prealloc'd task.  This is important
1311          * to ensure that we don't free it later.
1312          */
1313         tqe->tqent_un.tqent_flags |= TQENT_FLAG_PREALLOC;
1314         /*
1315          * Enqueue the task to the underlying queue.
1316          */
1317         mutex_enter(&tq->tq_lock);
1318 
1319         if (flags & TQ_FRONT) {
1320                 TQ_ENQUEUE_FRONT(tq, tqe, func, arg);
1321         } else {
1322                 TQ_ENQUEUE(tq, tqe, func, arg);
1323         }
1324         mutex_exit(&tq->tq_lock);
1325 }
1326 
1327 /*
1328  * Allow our caller to ask if there are tasks pending on the queue.
1329  */
1330 boolean_t
1331 taskq_empty(taskq_t *tq)
1332 {
1333         boolean_t rv;
1334 
1335         ASSERT3P(tq, !=, curthread->t_taskq);
1336         mutex_enter(&tq->tq_lock);
1337         rv = (tq->tq_task.tqent_next == &tq->tq_task) && (tq->tq_active == 0);
1338         mutex_exit(&tq->tq_lock);
1339 
1340         return (rv);
1341 }
1342 
1343 /*
1344  * Wait for all pending tasks to complete.
1345  * Calling taskq_wait from a task will cause deadlock.
1346  */
1347 void
1348 taskq_wait(taskq_t *tq)
1349 {
1350         ASSERT(tq != curthread->t_taskq);
1351 
1352         mutex_enter(&tq->tq_lock);
1353         while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
1354                 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
1355         mutex_exit(&tq->tq_lock);
1356 
1357         if (tq->tq_flags & TASKQ_DYNAMIC) {
1358                 taskq_bucket_t *b = tq->tq_buckets;
1359                 int bid = 0;
1360                 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
1361                         mutex_enter(&b->tqbucket_lock);
1362                         while (b->tqbucket_nalloc > 0)