blob: 64bb0d48e96f05a3a2a0906149c44a02c98fd469 [file] [log] [blame]
Jens Axboe3d442232008-06-26 11:21:34 +02001/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <[email protected]> 2008
Jens Axboe3d442232008-06-26 11:21:34 +02005 */
Jens Axboe3d442232008-06-26 11:21:34 +02006#include <linux/rcupdate.h>
Linus Torvalds59190f4212008-07-15 14:02:33 -07007#include <linux/rculist.h>
Ingo Molnar641cd4c2009-03-13 10:47:34 +01008#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04009#include <linux/export.h>
Ingo Molnar0b13fda2009-02-25 16:52:11 +010010#include <linux/percpu.h>
11#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Jens Axboe3d442232008-06-26 11:21:34 +020013#include <linux/smp.h>
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010014#include <linux/cpu.h>
Jens Axboe3d442232008-06-26 11:21:34 +020015
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070016#include "smpboot.h"
17
Jens Axboe3d442232008-06-26 11:21:34 +020018enum {
Peter Zijlstra6e275632009-02-25 13:59:48 +010019 CSD_FLAG_LOCK = 0x01,
Jens Axboec84a83e2013-05-17 09:58:43 +020020 CSD_FLAG_WAIT = 0x02,
Jens Axboe3d442232008-06-26 11:21:34 +020021};
22
23struct call_function_data {
Shaohua Li9a46ad62013-02-21 16:43:03 -080024 struct call_single_data __percpu *csd;
Ingo Molnar0b13fda2009-02-25 16:52:11 +010025 cpumask_var_t cpumask;
Jens Axboe3d442232008-06-26 11:21:34 +020026};
27
Milton Millere03bcb62010-01-18 13:00:51 +110028static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
29
Christoph Hellwig6897fc22014-01-30 15:45:47 -080030static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010031
32static int
33hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
34{
35 long cpu = (long)hcpu;
36 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
37
38 switch (action) {
39 case CPU_UP_PREPARE:
40 case CPU_UP_PREPARE_FROZEN:
Yinghai Lueaa958402009-06-06 14:51:36 -070041 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010042 cpu_to_node(cpu)))
Akinobu Mita80b51842010-05-26 14:43:32 -070043 return notifier_from_errno(-ENOMEM);
Shaohua Li9a46ad62013-02-21 16:43:03 -080044 cfd->csd = alloc_percpu(struct call_single_data);
45 if (!cfd->csd) {
46 free_cpumask_var(cfd->cpumask);
47 return notifier_from_errno(-ENOMEM);
48 }
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010049 break;
50
Xiao Guangrong69dd6472009-08-06 15:07:29 -070051#ifdef CONFIG_HOTPLUG_CPU
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010052 case CPU_UP_CANCELED:
53 case CPU_UP_CANCELED_FROZEN:
54
55 case CPU_DEAD:
56 case CPU_DEAD_FROZEN:
57 free_cpumask_var(cfd->cpumask);
Shaohua Li9a46ad62013-02-21 16:43:03 -080058 free_percpu(cfd->csd);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010059 break;
60#endif
61 };
62
63 return NOTIFY_OK;
64}
65
Paul Gortmaker0db06282013-06-19 14:53:51 -040066static struct notifier_block hotplug_cfd_notifier = {
Ingo Molnar0b13fda2009-02-25 16:52:11 +010067 .notifier_call = hotplug_cfd,
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010068};
69
Takao Indohd8ad7d12011-03-29 12:35:04 -040070void __init call_function_init(void)
Jens Axboe3d442232008-06-26 11:21:34 +020071{
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010072 void *cpu = (void *)(long)smp_processor_id();
Jens Axboe3d442232008-06-26 11:21:34 +020073 int i;
74
Christoph Hellwig6897fc22014-01-30 15:45:47 -080075 for_each_possible_cpu(i)
76 init_llist_head(&per_cpu(call_single_queue, i));
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010077
78 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
79 register_cpu_notifier(&hotplug_cfd_notifier);
Jens Axboe3d442232008-06-26 11:21:34 +020080}
81
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010082/*
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010083 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
84 *
Ingo Molnar0b13fda2009-02-25 16:52:11 +010085 * For non-synchronous ipi calls the csd can still be in use by the
86 * previous function call. For multi-cpu calls its even more interesting
87 * as we'll have to ensure no other cpu is observing our csd.
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010088 */
Andrew Mortone1d12f32013-04-30 15:27:28 -070089static void csd_lock_wait(struct call_single_data *csd)
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010090{
Andrew Mortone1d12f32013-04-30 15:27:28 -070091 while (csd->flags & CSD_FLAG_LOCK)
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010092 cpu_relax();
Peter Zijlstra6e275632009-02-25 13:59:48 +010093}
94
Andrew Mortone1d12f32013-04-30 15:27:28 -070095static void csd_lock(struct call_single_data *csd)
Peter Zijlstra6e275632009-02-25 13:59:48 +010096{
Andrew Mortone1d12f32013-04-30 15:27:28 -070097 csd_lock_wait(csd);
98 csd->flags |= CSD_FLAG_LOCK;
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010099
100 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100101 * prevent CPU from reordering the above assignment
102 * to ->flags with any subsequent assignments to other
103 * fields of the specified call_single_data structure:
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100104 */
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100105 smp_mb();
106}
107
Andrew Mortone1d12f32013-04-30 15:27:28 -0700108static void csd_unlock(struct call_single_data *csd)
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100109{
Jens Axboec84a83e2013-05-17 09:58:43 +0200110 WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100111
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100112 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100113 * ensure we're all done before releasing data:
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100114 */
115 smp_mb();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100116
Andrew Mortone1d12f32013-04-30 15:27:28 -0700117 csd->flags &= ~CSD_FLAG_LOCK;
Jens Axboe3d442232008-06-26 11:21:34 +0200118}
119
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100120static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
121
Jens Axboe3d442232008-06-26 11:21:34 +0200122/*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100123 * Insert a previously allocated call_single_data element
124 * for execution on the given CPU. data must already have
125 * ->func, ->info, and ->flags set.
Jens Axboe3d442232008-06-26 11:21:34 +0200126 */
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100127static int generic_exec_single(int cpu, struct call_single_data *csd,
128 smp_call_func_t func, void *info, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200129{
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100130 struct call_single_data csd_stack = { .flags = 0 };
131 unsigned long flags;
132
133
134 if (cpu == smp_processor_id()) {
135 local_irq_save(flags);
136 func(info);
137 local_irq_restore(flags);
138 return 0;
139 }
140
141
142 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
143 return -ENXIO;
144
145
146 if (!csd) {
147 csd = &csd_stack;
148 if (!wait)
149 csd = &__get_cpu_var(csd_data);
150 }
151
152 csd_lock(csd);
153
154 csd->func = func;
155 csd->info = info;
156
Jens Axboec84a83e2013-05-17 09:58:43 +0200157 if (wait)
158 csd->flags |= CSD_FLAG_WAIT;
159
Suresh Siddha561920a02008-10-30 18:28:41 +0100160 /*
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100161 * The list addition should be visible before sending the IPI
162 * handler locks the list to pull the entry off it because of
163 * normal cache coherency rules implied by spinlocks.
164 *
165 * If IPIs can go out of order to the cache coherency protocol
166 * in an architecture, sufficient synchronisation should be added
167 * to arch code to make it appear to obey cache coherency WRT
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100168 * locking and barrier primitives. Generic code isn't really
169 * equipped to do the right thing...
Suresh Siddha561920a02008-10-30 18:28:41 +0100170 */
Christoph Hellwig6897fc22014-01-30 15:45:47 -0800171 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
Jens Axboe3d442232008-06-26 11:21:34 +0200172 arch_send_call_function_single_ipi(cpu);
173
174 if (wait)
Andrew Mortone1d12f32013-04-30 15:27:28 -0700175 csd_lock_wait(csd);
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100176
177 return 0;
Jens Axboe3d442232008-06-26 11:21:34 +0200178}
179
180/*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100181 * Invoked by arch to handle an IPI for call function single. Must be
182 * called from the arch with interrupts disabled.
Jens Axboe3d442232008-06-26 11:21:34 +0200183 */
184void generic_smp_call_function_single_interrupt(void)
185{
Jan Kara5fd77592014-02-24 16:39:55 +0100186 struct llist_node *entry;
187 struct call_single_data *csd, *csd_next;
Jens Axboe3d442232008-06-26 11:21:34 +0200188
Suresh Siddha269c8612009-08-19 18:05:35 -0700189 /*
190 * Shouldn't receive this interrupt on a cpu that is not yet online.
191 */
192 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
193
Christoph Hellwig6897fc22014-01-30 15:45:47 -0800194 entry = llist_del_all(&__get_cpu_var(call_single_queue));
195 entry = llist_reverse_order(entry);
Jens Axboe3d442232008-06-26 11:21:34 +0200196
Jan Kara5fd77592014-02-24 16:39:55 +0100197 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
Andrew Mortone1d12f32013-04-30 15:27:28 -0700198 csd->func(csd->info);
Xie XiuQi46591962013-07-30 11:06:09 +0800199 csd_unlock(csd);
Jens Axboe3d442232008-06-26 11:21:34 +0200200 }
201}
202
203/*
204 * smp_call_function_single - Run a function on a specific CPU
205 * @func: The function to run. This must be fast and non-blocking.
206 * @info: An arbitrary pointer to pass to the function.
Jens Axboe3d442232008-06-26 11:21:34 +0200207 * @wait: If true, wait until function has completed on other CPUs.
208 *
Sheng Yang72f279b2009-10-22 19:19:34 +0800209 * Returns 0 on success, else a negative status code.
Jens Axboe3d442232008-06-26 11:21:34 +0200210 */
David Howells3a5f65df2010-10-27 17:28:36 +0100211int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
Jens Axboe8691e5a2008-06-06 11:18:06 +0200212 int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200213{
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100214 int this_cpu;
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100215 int err;
Jens Axboe3d442232008-06-26 11:21:34 +0200216
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100217 /*
218 * prevent preemption and reschedule on another processor,
219 * as well as CPU removal
220 */
221 this_cpu = get_cpu();
222
Suresh Siddha269c8612009-08-19 18:05:35 -0700223 /*
224 * Can deadlock when called with interrupts disabled.
225 * We allow cpu's that are not yet online though, as no one else can
226 * send smp call function interrupt to this cpu and as such deadlocks
227 * can't happen.
228 */
229 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
230 && !oops_in_progress);
Jens Axboe3d442232008-06-26 11:21:34 +0200231
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100232 err = generic_exec_single(cpu, NULL, func, info, wait);
Jens Axboe3d442232008-06-26 11:21:34 +0200233
234 put_cpu();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100235
H. Peter Anvinf73be6de2008-08-25 17:07:14 -0700236 return err;
Jens Axboe3d442232008-06-26 11:21:34 +0200237}
238EXPORT_SYMBOL(smp_call_function_single);
239
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800240/*
241 * smp_call_function_any - Run a function on any of the given cpus
242 * @mask: The mask of cpus it can run on.
243 * @func: The function to run. This must be fast and non-blocking.
244 * @info: An arbitrary pointer to pass to the function.
245 * @wait: If true, wait until function has completed.
246 *
247 * Returns 0 on success, else a negative status code (if no cpus were online).
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800248 *
249 * Selection preference:
250 * 1) current cpu if in @mask
251 * 2) any cpu of current node if in @mask
252 * 3) any other online cpu in @mask
253 */
254int smp_call_function_any(const struct cpumask *mask,
David Howells3a5f65df2010-10-27 17:28:36 +0100255 smp_call_func_t func, void *info, int wait)
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800256{
257 unsigned int cpu;
258 const struct cpumask *nodemask;
259 int ret;
260
261 /* Try for same CPU (cheapest) */
262 cpu = get_cpu();
263 if (cpumask_test_cpu(cpu, mask))
264 goto call;
265
266 /* Try for same node. */
David Johnaf2422c2010-01-15 17:01:23 -0800267 nodemask = cpumask_of_node(cpu_to_node(cpu));
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800268 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
269 cpu = cpumask_next_and(cpu, nodemask, mask)) {
270 if (cpu_online(cpu))
271 goto call;
272 }
273
274 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
275 cpu = cpumask_any_and(mask, cpu_online_mask);
276call:
277 ret = smp_call_function_single(cpu, func, info, wait);
278 put_cpu();
279 return ret;
280}
281EXPORT_SYMBOL_GPL(smp_call_function_any);
282
Jens Axboe3d442232008-06-26 11:21:34 +0200283/**
Heiko Carstens27c379f2010-09-10 13:47:29 +0200284 * __smp_call_function_single(): Run a function on a specific CPU
Jens Axboe3d442232008-06-26 11:21:34 +0200285 * @cpu: The CPU to run on.
Jan Kara08eed442014-02-24 16:39:57 +0100286 * @csd: Pre-allocated and setup data structure
Heiko Carstens27c379f2010-09-10 13:47:29 +0200287 * @wait: If true, wait until function has completed on specified CPU.
Jens Axboe3d442232008-06-26 11:21:34 +0200288 *
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100289 * Like smp_call_function_single(), but allow caller to pass in a
290 * pre-allocated data structure. Useful for embedding @data inside
291 * other structures, for instance.
Jens Axboe3d442232008-06-26 11:21:34 +0200292 */
Jan Kara08eed442014-02-24 16:39:57 +0100293int __smp_call_function_single(int cpu, struct call_single_data *csd, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200294{
Jan Kara08eed442014-02-24 16:39:57 +0100295 int err = 0;
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100296 int this_cpu;
Jens Axboe3d442232008-06-26 11:21:34 +0200297
Heiko Carstens27c379f2010-09-10 13:47:29 +0200298 this_cpu = get_cpu();
Suresh Siddha269c8612009-08-19 18:05:35 -0700299 /*
300 * Can deadlock when called with interrupts disabled.
301 * We allow cpu's that are not yet online though, as no one else can
302 * send smp call function interrupt to this cpu and as such deadlocks
303 * can't happen.
304 */
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100305 WARN_ON_ONCE(cpu_online(this_cpu) && wait && irqs_disabled()
Suresh Siddha269c8612009-08-19 18:05:35 -0700306 && !oops_in_progress);
Peter Zijlstra6e275632009-02-25 13:59:48 +0100307
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100308 err = generic_exec_single(cpu, csd, csd->func, csd->info, wait);
Heiko Carstens27c379f2010-09-10 13:47:29 +0200309 put_cpu();
Frederic Weisbecker8b284992014-02-24 16:39:58 +0100310
Jan Kara08eed442014-02-24 16:39:57 +0100311 return err;
Jens Axboe3d442232008-06-26 11:21:34 +0200312}
Jens Axboee3daab62013-10-25 11:45:35 +0100313EXPORT_SYMBOL_GPL(__smp_call_function_single);
Jens Axboe3d442232008-06-26 11:21:34 +0200314
315/**
Rusty Russell54b11e62008-12-30 09:05:16 +1030316 * smp_call_function_many(): Run a function on a set of other CPUs.
317 * @mask: The set of cpus to run on (only runs on online subset).
Jens Axboe3d442232008-06-26 11:21:34 +0200318 * @func: The function to run. This must be fast and non-blocking.
319 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100320 * @wait: If true, wait (atomically) until function has completed
321 * on other CPUs.
Jens Axboe3d442232008-06-26 11:21:34 +0200322 *
Sheng Yang72f279b2009-10-22 19:19:34 +0800323 * If @wait is true, then returns once @func has returned.
Jens Axboe3d442232008-06-26 11:21:34 +0200324 *
325 * You must not call this function with disabled interrupts or from a
326 * hardware interrupt handler or from a bottom half handler. Preemption
327 * must be disabled when calling this function.
328 */
Rusty Russell54b11e62008-12-30 09:05:16 +1030329void smp_call_function_many(const struct cpumask *mask,
David Howells3a5f65df2010-10-27 17:28:36 +0100330 smp_call_func_t func, void *info, bool wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200331{
Andrew Mortone1d12f32013-04-30 15:27:28 -0700332 struct call_function_data *cfd;
Shaohua Li9a46ad62013-02-21 16:43:03 -0800333 int cpu, next_cpu, this_cpu = smp_processor_id();
Jens Axboe3d442232008-06-26 11:21:34 +0200334
Suresh Siddha269c8612009-08-19 18:05:35 -0700335 /*
336 * Can deadlock when called with interrupts disabled.
337 * We allow cpu's that are not yet online though, as no one else can
338 * send smp call function interrupt to this cpu and as such deadlocks
339 * can't happen.
340 */
341 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
Tejun Heobd924e82011-01-20 12:07:13 +0100342 && !oops_in_progress && !early_boot_irqs_disabled);
Jens Axboe3d442232008-06-26 11:21:34 +0200343
Milton Miller723aae22011-03-15 13:27:17 -0600344 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
Rusty Russell54b11e62008-12-30 09:05:16 +1030345 cpu = cpumask_first_and(mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100346 if (cpu == this_cpu)
Rusty Russell54b11e62008-12-30 09:05:16 +1030347 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100348
Rusty Russell54b11e62008-12-30 09:05:16 +1030349 /* No online cpus? We're done. */
350 if (cpu >= nr_cpu_ids)
351 return;
Jens Axboe3d442232008-06-26 11:21:34 +0200352
Rusty Russell54b11e62008-12-30 09:05:16 +1030353 /* Do we have another CPU which isn't us? */
354 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100355 if (next_cpu == this_cpu)
Rusty Russell54b11e62008-12-30 09:05:16 +1030356 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
357
358 /* Fastpath: do that cpu by itself. */
359 if (next_cpu >= nr_cpu_ids) {
360 smp_call_function_single(cpu, func, info, wait);
361 return;
Jens Axboe3d442232008-06-26 11:21:34 +0200362 }
363
Andrew Mortone1d12f32013-04-30 15:27:28 -0700364 cfd = &__get_cpu_var(cfd_data);
Milton Miller45a57912011-03-15 13:27:16 -0600365
Andrew Mortone1d12f32013-04-30 15:27:28 -0700366 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
367 cpumask_clear_cpu(this_cpu, cfd->cpumask);
Milton Miller723aae22011-03-15 13:27:17 -0600368
369 /* Some callers race with other cpus changing the passed mask */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700370 if (unlikely(!cpumask_weight(cfd->cpumask)))
Milton Miller723aae22011-03-15 13:27:17 -0600371 return;
Anton Blanchard6dc19892011-01-20 14:44:33 -0800372
Andrew Mortone1d12f32013-04-30 15:27:28 -0700373 for_each_cpu(cpu, cfd->cpumask) {
374 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
Shaohua Li9a46ad62013-02-21 16:43:03 -0800375
376 csd_lock(csd);
377 csd->func = func;
378 csd->info = info;
Christoph Hellwig6897fc22014-01-30 15:45:47 -0800379 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
Shaohua Li9a46ad62013-02-21 16:43:03 -0800380 }
Suresh Siddha561920a02008-10-30 18:28:41 +0100381
Jens Axboe3d442232008-06-26 11:21:34 +0200382 /* Send a message to all CPUs in the map */
Roman Gushchin73f94552014-01-30 15:45:48 -0800383 arch_send_call_function_ipi_mask(cfd->cpumask);
Jens Axboe3d442232008-06-26 11:21:34 +0200384
Shaohua Li9a46ad62013-02-21 16:43:03 -0800385 if (wait) {
Andrew Mortone1d12f32013-04-30 15:27:28 -0700386 for_each_cpu(cpu, cfd->cpumask) {
387 struct call_single_data *csd;
388
389 csd = per_cpu_ptr(cfd->csd, cpu);
Shaohua Li9a46ad62013-02-21 16:43:03 -0800390 csd_lock_wait(csd);
391 }
392 }
Jens Axboe3d442232008-06-26 11:21:34 +0200393}
Rusty Russell54b11e62008-12-30 09:05:16 +1030394EXPORT_SYMBOL(smp_call_function_many);
Jens Axboe3d442232008-06-26 11:21:34 +0200395
396/**
397 * smp_call_function(): Run a function on all other CPUs.
398 * @func: The function to run. This must be fast and non-blocking.
399 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100400 * @wait: If true, wait (atomically) until function has completed
401 * on other CPUs.
Jens Axboe3d442232008-06-26 11:21:34 +0200402 *
Rusty Russell54b11e62008-12-30 09:05:16 +1030403 * Returns 0.
Jens Axboe3d442232008-06-26 11:21:34 +0200404 *
405 * If @wait is true, then returns once @func has returned; otherwise
Sheng Yang72f279b2009-10-22 19:19:34 +0800406 * it returns just before the target cpu calls @func.
Jens Axboe3d442232008-06-26 11:21:34 +0200407 *
408 * You must not call this function with disabled interrupts or from a
409 * hardware interrupt handler or from a bottom half handler.
410 */
David Howells3a5f65df2010-10-27 17:28:36 +0100411int smp_call_function(smp_call_func_t func, void *info, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200412{
Jens Axboe3d442232008-06-26 11:21:34 +0200413 preempt_disable();
Rusty Russell54b11e62008-12-30 09:05:16 +1030414 smp_call_function_many(cpu_online_mask, func, info, wait);
Jens Axboe3d442232008-06-26 11:21:34 +0200415 preempt_enable();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100416
Rusty Russell54b11e62008-12-30 09:05:16 +1030417 return 0;
Jens Axboe3d442232008-06-26 11:21:34 +0200418}
419EXPORT_SYMBOL(smp_call_function);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800420
Amerigo Wang34db18a02011-03-22 16:34:06 -0700421/* Setup configured maximum number of CPUs to activate */
422unsigned int setup_max_cpus = NR_CPUS;
423EXPORT_SYMBOL(setup_max_cpus);
424
425
426/*
427 * Setup routine for controlling SMP activation
428 *
429 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
430 * activation entirely (the MPS table probe still happens, though).
431 *
432 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
433 * greater than 0, limits the maximum number of CPUs activated in
434 * SMP mode to <NUM>.
435 */
436
437void __weak arch_disable_smp_support(void) { }
438
439static int __init nosmp(char *str)
440{
441 setup_max_cpus = 0;
442 arch_disable_smp_support();
443
444 return 0;
445}
446
447early_param("nosmp", nosmp);
448
449/* this is hard limit */
450static int __init nrcpus(char *str)
451{
452 int nr_cpus;
453
454 get_option(&str, &nr_cpus);
455 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
456 nr_cpu_ids = nr_cpus;
457
458 return 0;
459}
460
461early_param("nr_cpus", nrcpus);
462
463static int __init maxcpus(char *str)
464{
465 get_option(&str, &setup_max_cpus);
466 if (setup_max_cpus == 0)
467 arch_disable_smp_support();
468
469 return 0;
470}
471
472early_param("maxcpus", maxcpus);
473
474/* Setup number of possible processor ids */
475int nr_cpu_ids __read_mostly = NR_CPUS;
476EXPORT_SYMBOL(nr_cpu_ids);
477
478/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
479void __init setup_nr_cpu_ids(void)
480{
481 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
482}
483
Borislav Petkova17bce42013-09-30 11:56:24 +0200484void __weak smp_announce(void)
485{
486 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
487}
488
Amerigo Wang34db18a02011-03-22 16:34:06 -0700489/* Called by boot processor to activate the rest. */
490void __init smp_init(void)
491{
492 unsigned int cpu;
493
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700494 idle_threads_init();
495
Amerigo Wang34db18a02011-03-22 16:34:06 -0700496 /* FIXME: This should be done in userspace --RR */
497 for_each_present_cpu(cpu) {
498 if (num_online_cpus() >= setup_max_cpus)
499 break;
500 if (!cpu_online(cpu))
501 cpu_up(cpu);
502 }
503
504 /* Any cleanup work */
Borislav Petkova17bce42013-09-30 11:56:24 +0200505 smp_announce();
Amerigo Wang34db18a02011-03-22 16:34:06 -0700506 smp_cpus_done(setup_max_cpus);
507}
508
Amerigo Wang351f8f82011-01-12 16:59:39 -0800509/*
Tejun Heobd924e82011-01-20 12:07:13 +0100510 * Call a function on all processors. May be used during early boot while
511 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
512 * of local_irq_disable/enable().
Amerigo Wang351f8f82011-01-12 16:59:39 -0800513 */
514int on_each_cpu(void (*func) (void *info), void *info, int wait)
515{
Tejun Heobd924e82011-01-20 12:07:13 +0100516 unsigned long flags;
Amerigo Wang351f8f82011-01-12 16:59:39 -0800517 int ret = 0;
518
519 preempt_disable();
520 ret = smp_call_function(func, info, wait);
Tejun Heobd924e82011-01-20 12:07:13 +0100521 local_irq_save(flags);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800522 func(info);
Tejun Heobd924e82011-01-20 12:07:13 +0100523 local_irq_restore(flags);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800524 preempt_enable();
525 return ret;
526}
527EXPORT_SYMBOL(on_each_cpu);
Gilad Ben-Yossef3fc498f2012-03-28 14:42:43 -0700528
529/**
530 * on_each_cpu_mask(): Run a function on processors specified by
531 * cpumask, which may include the local processor.
532 * @mask: The set of cpus to run on (only runs on online subset).
533 * @func: The function to run. This must be fast and non-blocking.
534 * @info: An arbitrary pointer to pass to the function.
535 * @wait: If true, wait (atomically) until function has completed
536 * on other CPUs.
537 *
538 * If @wait is true, then returns once @func has returned.
539 *
David Daney202da402013-09-11 14:23:29 -0700540 * You must not call this function with disabled interrupts or from a
541 * hardware interrupt handler or from a bottom half handler. The
542 * exception is that it may be used during early boot while
543 * early_boot_irqs_disabled is set.
Gilad Ben-Yossef3fc498f2012-03-28 14:42:43 -0700544 */
545void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
546 void *info, bool wait)
547{
548 int cpu = get_cpu();
549
550 smp_call_function_many(mask, func, info, wait);
551 if (cpumask_test_cpu(cpu, mask)) {
David Daney202da402013-09-11 14:23:29 -0700552 unsigned long flags;
553 local_irq_save(flags);
Gilad Ben-Yossef3fc498f2012-03-28 14:42:43 -0700554 func(info);
David Daney202da402013-09-11 14:23:29 -0700555 local_irq_restore(flags);
Gilad Ben-Yossef3fc498f2012-03-28 14:42:43 -0700556 }
557 put_cpu();
558}
559EXPORT_SYMBOL(on_each_cpu_mask);
Gilad Ben-Yossefb3a7e982012-03-28 14:42:43 -0700560
561/*
562 * on_each_cpu_cond(): Call a function on each processor for which
563 * the supplied function cond_func returns true, optionally waiting
564 * for all the required CPUs to finish. This may include the local
565 * processor.
566 * @cond_func: A callback function that is passed a cpu id and
567 * the the info parameter. The function is called
568 * with preemption disabled. The function should
569 * return a blooean value indicating whether to IPI
570 * the specified CPU.
571 * @func: The function to run on all applicable CPUs.
572 * This must be fast and non-blocking.
573 * @info: An arbitrary pointer to pass to both functions.
574 * @wait: If true, wait (atomically) until function has
575 * completed on other CPUs.
576 * @gfp_flags: GFP flags to use when allocating the cpumask
577 * used internally by the function.
578 *
579 * The function might sleep if the GFP flags indicates a non
580 * atomic allocation is allowed.
581 *
582 * Preemption is disabled to protect against CPUs going offline but not online.
583 * CPUs going online during the call will not be seen or sent an IPI.
584 *
585 * You must not call this function with disabled interrupts or
586 * from a hardware interrupt handler or from a bottom half handler.
587 */
588void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
589 smp_call_func_t func, void *info, bool wait,
590 gfp_t gfp_flags)
591{
592 cpumask_var_t cpus;
593 int cpu, ret;
594
595 might_sleep_if(gfp_flags & __GFP_WAIT);
596
597 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
598 preempt_disable();
599 for_each_online_cpu(cpu)
600 if (cond_func(cpu, info))
601 cpumask_set_cpu(cpu, cpus);
602 on_each_cpu_mask(cpus, func, info, wait);
603 preempt_enable();
604 free_cpumask_var(cpus);
605 } else {
606 /*
607 * No free cpumask, bother. No matter, we'll
608 * just have to IPI them one by one.
609 */
610 preempt_disable();
611 for_each_online_cpu(cpu)
612 if (cond_func(cpu, info)) {
613 ret = smp_call_function_single(cpu, func,
614 info, wait);
615 WARN_ON_ONCE(!ret);
616 }
617 preempt_enable();
618 }
619}
620EXPORT_SYMBOL(on_each_cpu_cond);
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000621
622static void do_nothing(void *unused)
623{
624}
625
626/**
627 * kick_all_cpus_sync - Force all cpus out of idle
628 *
629 * Used to synchronize the update of pm_idle function pointer. It's
630 * called after the pointer is updated and returns after the dummy
631 * callback function has been executed on all cpus. The execution of
632 * the function can only happen on the remote cpus after they have
633 * left the idle function which had been called via pm_idle function
634 * pointer. So it's guaranteed that nothing uses the previous pointer
635 * anymore.
636 */
637void kick_all_cpus_sync(void)
638{
639 /* Make sure the change is visible before we kick the cpus */
640 smp_mb();
641 smp_call_function(do_nothing, NULL, 1);
642}
643EXPORT_SYMBOL_GPL(kick_all_cpus_sync);