summaryrefslogtreecommitdiff
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-24 11:01:33 -0800
committerTejun Heo <tj@kernel.org>2013-01-24 11:01:33 -0800
commit715b06b864c99a18cb8368dfb187da4f569788cd (patch)
tree599ab1152a1c93b83d1be05aaeb370cac2e7e3eb /kernel/workqueue.c
parent35b6bb63b8a288f90e07948867941a553b3d97bc (diff)
workqueue: introduce WORK_OFFQ_CPU_NONE
Currently, when a work item is off queue, high bits of its data encodes the last CPU it was on. This is scheduled to be changed to pool ID, which will make it impossible to use WORK_CPU_NONE to indicate no association. This patch limits the number of bits which are used for off-queue cpu number to 31 (so that the max fits in an int) and uses the highest possible value - WORK_OFFQ_CPU_NONE - to indicate no association. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1a686e481132..405282d30046 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -573,7 +573,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
(data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
cpu = data >> WORK_OFFQ_CPU_SHIFT;
- if (cpu == WORK_CPU_NONE)
+ if (cpu == WORK_OFFQ_CPU_NONE)
return NULL;
BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
@@ -583,7 +583,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
static void mark_work_canceling(struct work_struct *work)
{
struct global_cwq *gcwq = get_work_gcwq(work);
- unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
+ unsigned long cpu = gcwq ? gcwq->cpu : WORK_OFFQ_CPU_NONE;
set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
WORK_STRUCT_PENDING);