Skip to content

Commit

Permalink
workqueue: use new hashtable implementation
Browse files Browse the repository at this point in the history
Switch workqueues to use the new hashtable implementation. This reduces the
amount of generic unrelated code in the workqueues.

This patch depends on d9b482c ("hashtable: introduce a small and naive
hashtable") which was merged in v3.6.

Acked-by: Tejun Heo <[email protected]>
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: Tejun Heo <[email protected]>
  • Loading branch information
sashalevin authored and htejun committed Dec 18, 2012
1 parent 848b814 commit 42f8570
Showing 1 changed file with 15 additions and 71 deletions.
86 changes: 15 additions & 71 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <linux/hashtable.h>

#include "workqueue_sched.h"

Expand Down Expand Up @@ -82,8 +83,6 @@ enum {
NR_WORKER_POOLS = 2, /* # worker pools per gcwq */

BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,

MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
Expand Down Expand Up @@ -180,7 +179,7 @@ struct global_cwq {
unsigned int flags; /* L: GCWQ_* flags */

/* workers are chained either in busy_hash or pool idle_list */
struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
/* L: hash of busy workers */

struct worker_pool pools[NR_WORKER_POOLS];
Expand Down Expand Up @@ -285,8 +284,7 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
(pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)

#define for_each_busy_worker(worker, i, pos, gcwq) \
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
hash_for_each(gcwq->busy_hash, i, pos, worker, hentry)

static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
unsigned int sw)
Expand Down Expand Up @@ -858,63 +856,6 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
atomic_inc(get_pool_nr_running(pool));
}

/**
* busy_worker_head - return the busy hash head for a work
* @gcwq: gcwq of interest
* @work: work to be hashed
*
* Return hash head of @gcwq for @work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to the hash head.
*/
static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
struct work_struct *work)
{
const int base_shift = ilog2(sizeof(struct work_struct));
unsigned long v = (unsigned long)work;

/* simple shift and fold hash, do we need something better? */
v >>= base_shift;
v += v >> BUSY_WORKER_HASH_ORDER;
v &= BUSY_WORKER_HASH_MASK;

return &gcwq->busy_hash[v];
}

/**
* __find_worker_executing_work - find worker which is executing a work
* @gcwq: gcwq of interest
* @bwh: hash head as returned by busy_worker_head()
* @work: work to find worker for
*
* Find a worker which is executing @work on @gcwq. @bwh should be
* the hash head obtained by calling busy_worker_head() with the same
* work.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*
* RETURNS:
* Pointer to worker which is executing @work if found, NULL
* otherwise.
*/
static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
struct hlist_head *bwh,
struct work_struct *work)
{
struct worker *worker;
struct hlist_node *tmp;

hlist_for_each_entry(worker, tmp, bwh, hentry)
if (worker->current_work == work)
return worker;
return NULL;
}

/**
* find_worker_executing_work - find worker which is executing a work
* @gcwq: gcwq of interest
Expand All @@ -934,8 +875,14 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
struct work_struct *work)
{
return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
work);
struct worker *worker;
struct hlist_node *tmp;

hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry, (unsigned long)work)
if (worker->current_work == work)
return worker;

return NULL;
}

/**
Expand Down Expand Up @@ -2166,7 +2113,6 @@ __acquires(&gcwq->lock)
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct worker_pool *pool = worker->pool;
struct global_cwq *gcwq = pool->gcwq;
struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
work_func_t f = work->func;
int work_color;
Expand Down Expand Up @@ -2198,15 +2144,15 @@ __acquires(&gcwq->lock)
* already processing the work. If so, defer the work to the
* currently executing one.
*/
collision = __find_worker_executing_work(gcwq, bwh, work);
collision = find_worker_executing_work(gcwq, work);
if (unlikely(collision)) {
move_linked_works(work, &collision->scheduled, NULL);
return;
}

/* claim and dequeue */
debug_work_deactivate(work);
hlist_add_head(&worker->hentry, bwh);
hash_add(gcwq->busy_hash, &worker->hentry, (unsigned long)worker);
worker->current_work = work;
worker->current_cwq = cwq;
work_color = get_work_color(work);
Expand Down Expand Up @@ -2264,7 +2210,7 @@ __acquires(&gcwq->lock)
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);

/* we're done with it, release */
hlist_del_init(&worker->hentry);
hash_del(&worker->hentry);
worker->current_work = NULL;
worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color);
Expand Down Expand Up @@ -3831,7 +3777,6 @@ void thaw_workqueues(void)
static int __init init_workqueues(void)
{
unsigned int cpu;
int i;

/* make sure we have enough bits for OFFQ CPU number */
BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
Expand All @@ -3849,8 +3794,7 @@ static int __init init_workqueues(void)
gcwq->cpu = cpu;
gcwq->flags |= GCWQ_DISASSOCIATED;

for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
hash_init(gcwq->busy_hash);

for_each_worker_pool(pool, gcwq) {
pool->gcwq = gcwq;
Expand Down

0 comments on commit 42f8570

Please sign in to comment.