Skip to content

Commit

Permalink
convert old cpumask API into new one
Browse files Browse the repository at this point in the history
Adapt new API.

Signed-off-by: KOSAKI Motohiro <[email protected]>
Signed-off-by: Frank Blaschka <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
kosaki authored and davem330 committed May 13, 2011
1 parent 9f6298a commit f201903
Showing 1 changed file with 37 additions and 36 deletions.
73 changes: 37 additions & 36 deletions net/iucv/iucv.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ struct iucv_irq_list {
};

static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE };
static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE };

/*
* Queue of interrupt buffers lock for delivery via the tasklet
Expand Down Expand Up @@ -406,7 +406,7 @@ static void iucv_allow_cpu(void *data)
parm->set_mask.ipmask = 0xf8;
iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
/* Set indication that iucv interrupts are allowed for this cpu. */
cpu_set(cpu, iucv_irq_cpumask);
cpumask_set_cpu(cpu, &iucv_irq_cpumask);
}

/**
Expand All @@ -426,7 +426,7 @@ static void iucv_block_cpu(void *data)
iucv_call_b2f0(IUCV_SETMASK, parm);

/* Clear indication that iucv interrupts are allowed for this cpu. */
cpu_clear(cpu, iucv_irq_cpumask);
cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
}

/**
Expand All @@ -451,7 +451,7 @@ static void iucv_block_cpu_almost(void *data)
iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);

/* Clear indication that iucv interrupts are allowed for this cpu. */
cpu_clear(cpu, iucv_irq_cpumask);
cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
}

/**
Expand All @@ -466,7 +466,7 @@ static void iucv_declare_cpu(void *data)
union iucv_param *parm;
int rc;

if (cpu_isset(cpu, iucv_buffer_cpumask))
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
return;

/* Declare interrupt buffer. */
Expand Down Expand Up @@ -499,9 +499,9 @@ static void iucv_declare_cpu(void *data)
}

/* Set indication that an iucv buffer exists for this cpu. */
cpu_set(cpu, iucv_buffer_cpumask);
cpumask_set_cpu(cpu, &iucv_buffer_cpumask);

if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask))
if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
/* Enable iucv interrupts on this cpu. */
iucv_allow_cpu(NULL);
else
Expand All @@ -520,7 +520,7 @@ static void iucv_retrieve_cpu(void *data)
int cpu = smp_processor_id();
union iucv_param *parm;

if (!cpu_isset(cpu, iucv_buffer_cpumask))
if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
return;

/* Block iucv interrupts. */
Expand All @@ -531,7 +531,7 @@ static void iucv_retrieve_cpu(void *data)
iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);

/* Clear indication that an iucv buffer exists for this cpu. */
cpu_clear(cpu, iucv_buffer_cpumask);
cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
}

/**
Expand All @@ -546,8 +546,8 @@ static void iucv_setmask_mp(void)
get_online_cpus();
for_each_online_cpu(cpu)
/* Enable all cpus with a declared buffer. */
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
!cpu_isset(cpu, iucv_irq_cpumask))
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
!cpumask_test_cpu(cpu, &iucv_irq_cpumask))
smp_call_function_single(cpu, iucv_allow_cpu,
NULL, 1);
put_online_cpus();
Expand All @@ -564,9 +564,9 @@ static void iucv_setmask_up(void)
int cpu;

/* Disable all cpu but the first in cpu_irq_cpumask. */
cpumask = iucv_irq_cpumask;
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
for_each_cpu_mask_nr(cpu, cpumask)
cpumask_copy(&cpumask, &iucv_irq_cpumask);
cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
for_each_cpu(cpu, &cpumask)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}

Expand All @@ -593,7 +593,7 @@ static int iucv_enable(void)
rc = -EIO;
for_each_online_cpu(cpu)
smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
if (cpus_empty(iucv_buffer_cpumask))
if (cpumask_empty(&iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
goto out;
put_online_cpus();
Expand Down Expand Up @@ -675,15 +675,16 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
case CPU_DOWN_PREPARE_FROZEN:
if (!iucv_path_table)
break;
cpumask = iucv_buffer_cpumask;
cpu_clear(cpu, cpumask);
if (cpus_empty(cpumask))
cpumask_copy(&cpumask, &iucv_buffer_cpumask);
cpumask_clear_cpu(cpu, &cpumask);
if (cpumask_empty(&cpumask))
/* Can't offline last IUCV enabled cpu. */
return notifier_from_errno(-EINVAL);
smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
if (cpus_empty(iucv_irq_cpumask))
smp_call_function_single(first_cpu(iucv_buffer_cpumask),
iucv_allow_cpu, NULL, 1);
if (cpumask_empty(&iucv_irq_cpumask))
smp_call_function_single(
cpumask_first(&iucv_buffer_cpumask),
iucv_allow_cpu, NULL, 1);
break;
}
return NOTIFY_OK;
Expand Down Expand Up @@ -866,7 +867,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
int rc;

local_bh_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -915,7 +916,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,

spin_lock_bh(&iucv_table_lock);
iucv_cleanup_queue();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -975,7 +976,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
int rc;

local_bh_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1007,7 +1008,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
int rc;

local_bh_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1036,7 +1037,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
int rc;

preempt_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1070,7 +1071,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
int rc;

local_bh_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1162,7 +1163,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1235,7 +1236,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
int rc;

local_bh_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1274,7 +1275,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
int rc;

local_bh_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1324,7 +1325,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
union iucv_param *parm;
int rc;

if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1411,7 +1412,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
int rc;

local_bh_disable();
if (cpus_empty(iucv_buffer_cpumask)) {
if (cpumask_empty(&iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
Expand Down Expand Up @@ -1888,7 +1889,7 @@ static int iucv_pm_freeze(struct device *dev)
printk(KERN_WARNING "iucv_pm_freeze\n");
#endif
if (iucv_pm_state != IUCV_PM_FREEZING) {
for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
for_each_cpu(cpu, &iucv_irq_cpumask)
smp_call_function_single(cpu, iucv_block_cpu_almost,
NULL, 1);
cancel_work_sync(&iucv_work);
Expand Down Expand Up @@ -1928,7 +1929,7 @@ static int iucv_pm_thaw(struct device *dev)
if (rc)
goto out;
}
if (cpus_empty(iucv_irq_cpumask)) {
if (cpumask_empty(&iucv_irq_cpumask)) {
if (iucv_nonsmp_handler)
/* enable interrupts on one cpu */
iucv_allow_cpu(NULL);
Expand Down Expand Up @@ -1961,7 +1962,7 @@ static int iucv_pm_restore(struct device *dev)
pr_warning("Suspending Linux did not completely close all IUCV "
"connections\n");
iucv_pm_state = IUCV_PM_RESTORING;
if (cpus_empty(iucv_irq_cpumask)) {
if (cpumask_empty(&iucv_irq_cpumask)) {
rc = iucv_query_maxconn();
rc = iucv_enable();
if (rc)
Expand Down

0 comments on commit f201903

Please sign in to comment.