]> git.openfabrics.org - ~shefty/rdma-dev.git/blobdiff - kernel/workqueue.c
workqueue: replace for_each_worker_pool() with for_each_std_worker_pool()
[~shefty/rdma-dev.git] / kernel / workqueue.c
index b609bfba134bbe4b4f639ecb04527b00a94de7b2..bd639c185da139bcc3d727a4c525c8c016b829d2 100644 (file)
@@ -250,9 +250,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
 
-#define for_each_worker_pool(pool, gcwq)                               \
-       for ((pool) = &(gcwq)->pools[0];                                \
-            (pool) < &(gcwq)->pools[NR_STD_WORKER_POOLS]; (pool)++)
+#define for_each_std_worker_pool(pool, cpu)                            \
+       for ((pool) = &get_gcwq((cpu))->pools[0];                       \
+            (pool) < &get_gcwq((cpu))->pools[NR_STD_WORKER_POOLS]; (pool)++)
 
 #define for_each_busy_worker(worker, i, pos, pool)                     \
        hash_for_each(pool->busy_hash, i, pos, worker, hentry)
@@ -3500,14 +3500,14 @@ EXPORT_SYMBOL_GPL(work_busy);
 
 static void gcwq_unbind_fn(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_gcwq(smp_processor_id());
+       int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
        struct hlist_node *pos;
        int i;
 
-       for_each_worker_pool(pool, gcwq) {
-               BUG_ON(pool->cpu != smp_processor_id());
+       for_each_std_worker_pool(pool, cpu) {
+               BUG_ON(cpu != smp_processor_id());
 
                mutex_lock(&pool->assoc_mutex);
                spin_lock_irq(&pool->lock);
@@ -3541,15 +3541,15 @@ static void gcwq_unbind_fn(struct work_struct *work)
        /*
         * Sched callbacks are disabled now.  Zap nr_running.  After this,
         * nr_running stays zero and need_more_worker() and keep_working()
-        * are always true as long as the worklist is not empty.  @gcwq now
-        * behaves as unbound (in terms of concurrency management) gcwq
-        * which is served by workers tied to the CPU.
+        * are always true as long as the worklist is not empty.  Pools on
+        * @cpu now behave as unbound (in terms of concurrency management)
+        * pools which are served by workers tied to the CPU.
         *
         * On return from this function, the current worker would trigger
         * unbound chain execution of pending work items if other workers
         * didn't already.
         */
-       for_each_worker_pool(pool, gcwq)
+       for_each_std_worker_pool(pool, cpu)
                atomic_set(get_pool_nr_running(pool), 0);
 }
 
@@ -3562,12 +3562,11 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                                               void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
-       struct global_cwq *gcwq = get_gcwq(cpu);
        struct worker_pool *pool;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        struct worker *worker;
 
                        if (pool->nr_workers)
@@ -3585,7 +3584,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        mutex_lock(&pool->assoc_mutex);
                        spin_lock_irq(&pool->lock);
 
@@ -3682,11 +3681,10 @@ void freeze_workqueues_begin(void)
        workqueue_freezing = true;
 
        for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        spin_lock_irq(&pool->lock);
 
                        WARN_ON_ONCE(pool->flags & POOL_FREEZING);
@@ -3772,11 +3770,10 @@ void thaw_workqueues(void)
                goto out_unlock;
 
        for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        spin_lock_irq(&pool->lock);
 
                        WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
@@ -3818,11 +3815,10 @@ static int __init init_workqueues(void)
 
        /* initialize gcwqs */
        for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker_pool *pool;
 
-               for_each_worker_pool(pool, gcwq) {
-                       pool->gcwq = gcwq;
+               for_each_std_worker_pool(pool, cpu) {
+                       pool->gcwq = get_gcwq(cpu);
                        spin_lock_init(&pool->lock);
                        pool->cpu = cpu;
                        pool->flags |= POOL_DISASSOCIATED;
@@ -3847,10 +3843,9 @@ static int __init init_workqueues(void)
 
        /* create the initial worker */
        for_each_online_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker_pool *pool;
 
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        struct worker *worker;
 
                        if (cpu != WORK_CPU_UNBOUND)