1
2
3
4
5
6
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/percpu.h>
10#include <linux/rcupdate.h>
11#include <linux/rculist.h>
12#include <linux/smp.h>
13
14static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15static LIST_HEAD(call_function_queue);
16__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
17
18enum {
19 CSD_FLAG_WAIT = 0x01,
20 CSD_FLAG_ALLOC = 0x02,
21};
22
23struct call_function_data {
24 struct call_single_data csd;
25 spinlock_t lock;
26 unsigned int refs;
27 cpumask_t cpumask;
28 struct rcu_head rcu_head;
29};
30
31struct call_single_queue {
32 struct list_head list;
33 spinlock_t lock;
34};
35
36static int __cpuinit init_call_single_data(void)
37{
38 int i;
39
40 for_each_possible_cpu(i) {
41 struct call_single_queue *q = &per_cpu(call_single_queue, i);
42
43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list);
45 }
46 return 0;
47}
48early_initcall(init_call_single_data);
49
50static void csd_flag_wait(struct call_single_data *data)
51{
52
53 do {
54 if (!(data->flags & CSD_FLAG_WAIT))
55 break;
56 cpu_relax();
57 } while (1);
58}
59
60
61
62
63
64static void generic_exec_single(int cpu, struct call_single_data *data)
65{
66 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
67 int wait = data->flags & CSD_FLAG_WAIT, ipi;
68 unsigned long flags;
69
70 spin_lock_irqsave(&dst->lock, flags);
71 ipi = list_empty(&dst->list);
72 list_add_tail(&data->list, &dst->list);
73 spin_unlock_irqrestore(&dst->lock, flags);
74
75
76
77
78 smp_mb();
79
80 if (ipi)
81 arch_send_call_function_single_ipi(cpu);
82
83 if (wait)
84 csd_flag_wait(data);
85}
86
87static void rcu_free_call_data(struct rcu_head *head)
88{
89 struct call_function_data *data;
90
91 data = container_of(head, struct call_function_data, rcu_head);
92
93 kfree(data);
94}
95
96
97
98
99
100void generic_smp_call_function_interrupt(void)
101{
102 struct call_function_data *data;
103 int cpu = get_cpu();
104
105
106
107
108
109 rcu_read_lock();
110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
111 int refs;
112
113 if (!cpu_isset(cpu, data->cpumask))
114 continue;
115
116 data->csd.func(data->csd.info);
117
118 spin_lock(&data->lock);
119 cpu_clear(cpu, data->cpumask);
120 WARN_ON(data->refs == 0);
121 data->refs--;
122 refs = data->refs;
123 spin_unlock(&data->lock);
124
125 if (refs)
126 continue;
127
128 spin_lock(&call_function_lock);
129 list_del_rcu(&data->csd.list);
130 spin_unlock(&call_function_lock);
131
132 if (data->csd.flags & CSD_FLAG_WAIT) {
133
134
135
136
137 smp_wmb();
138 data->csd.flags &= ~CSD_FLAG_WAIT;
139 }
140 if (data->csd.flags & CSD_FLAG_ALLOC)
141 call_rcu(&data->rcu_head, rcu_free_call_data);
142 }
143 rcu_read_unlock();
144
145 put_cpu();
146}
147
148
149
150
151
152void generic_smp_call_function_single_interrupt(void)
153{
154 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
155 LIST_HEAD(list);
156
157
158
159
160
161 smp_read_barrier_depends();
162 while (!list_empty(&q->list)) {
163 unsigned int data_flags;
164
165 spin_lock(&q->lock);
166 list_replace_init(&q->list, &list);
167 spin_unlock(&q->lock);
168
169 while (!list_empty(&list)) {
170 struct call_single_data *data;
171
172 data = list_entry(list.next, struct call_single_data,
173 list);
174 list_del(&data->list);
175
176
177
178
179
180
181
182 data_flags = data->flags;
183
184 data->func(data->info);
185
186 if (data_flags & CSD_FLAG_WAIT) {
187 smp_wmb();
188 data->flags &= ~CSD_FLAG_WAIT;
189 } else if (data_flags & CSD_FLAG_ALLOC)
190 kfree(data);
191 }
192
193
194
195 smp_read_barrier_depends();
196 }
197}
198
199
200
201
202
203
204
205
206
207
208
209int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
210 int wait)
211{
212 struct call_single_data d;
213 unsigned long flags;
214
215
216 int me = get_cpu();
217 int err = 0;
218
219
220 WARN_ON(irqs_disabled());
221
222 if (cpu == me) {
223 local_irq_save(flags);
224 func(info);
225 local_irq_restore(flags);
226 } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
227 struct call_single_data *data = NULL;
228
229 if (!wait) {
230 data = kmalloc(sizeof(*data), GFP_ATOMIC);
231 if (data)
232 data->flags = CSD_FLAG_ALLOC;
233 }
234 if (!data) {
235 data = &d;
236 data->flags = CSD_FLAG_WAIT;
237 }
238
239 data->func = func;
240 data->info = info;
241 generic_exec_single(cpu, data);
242 } else {
243 err = -ENXIO;
244 }
245
246 put_cpu();
247 return err;
248}
249EXPORT_SYMBOL(smp_call_function_single);
250
251
252
253
254
255
256
257
258
259
260
261void __smp_call_function_single(int cpu, struct call_single_data *data)
262{
263
264 WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
265
266 generic_exec_single(cpu, data);
267}
268
269
270static void quiesce_dummy(void *unused)
271{
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
292{
293 struct call_single_data data;
294 int cpu;
295
296 data.func = quiesce_dummy;
297 data.info = NULL;
298
299 for_each_cpu_mask(cpu, mask) {
300 data.flags = CSD_FLAG_WAIT;
301 generic_exec_single(cpu, &data);
302 }
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
323 int wait)
324{
325 struct call_function_data d;
326 struct call_function_data *data = NULL;
327 cpumask_t allbutself;
328 unsigned long flags;
329 int cpu, num_cpus;
330 int slowpath = 0;
331
332
333 WARN_ON(irqs_disabled());
334
335 cpu = smp_processor_id();
336 allbutself = cpu_online_map;
337 cpu_clear(cpu, allbutself);
338 cpus_and(mask, mask, allbutself);
339 num_cpus = cpus_weight(mask);
340
341
342
343
344
345 if (!num_cpus)
346 return 0;
347 else if (num_cpus == 1) {
348 cpu = first_cpu(mask);
349 return smp_call_function_single(cpu, func, info, wait);
350 }
351
352 data = kmalloc(sizeof(*data), GFP_ATOMIC);
353 if (data) {
354 data->csd.flags = CSD_FLAG_ALLOC;
355 if (wait)
356 data->csd.flags |= CSD_FLAG_WAIT;
357 } else {
358 data = &d;
359 data->csd.flags = CSD_FLAG_WAIT;
360 wait = 1;
361 slowpath = 1;
362 }
363
364 spin_lock_init(&data->lock);
365 data->csd.func = func;
366 data->csd.info = info;
367 data->refs = num_cpus;
368 data->cpumask = mask;
369
370 spin_lock_irqsave(&call_function_lock, flags);
371 list_add_tail_rcu(&data->csd.list, &call_function_queue);
372 spin_unlock_irqrestore(&call_function_lock, flags);
373
374
375
376
377 smp_mb();
378
379
380 arch_send_call_function_ipi(mask);
381
382
383 if (wait) {
384 csd_flag_wait(&data->csd);
385 if (unlikely(slowpath))
386 smp_call_function_mask_quiesce_stack(mask);
387 }
388
389 return 0;
390}
391EXPORT_SYMBOL(smp_call_function_mask);
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408int smp_call_function(void (*func)(void *), void *info, int wait)
409{
410 int ret;
411
412 preempt_disable();
413 ret = smp_call_function_mask(cpu_online_map, func, info, wait);
414 preempt_enable();
415 return ret;
416}
417EXPORT_SYMBOL(smp_call_function);
418
419void ipi_call_lock(void)
420{
421 spin_lock(&call_function_lock);
422}
423
424void ipi_call_unlock(void)
425{
426 spin_unlock(&call_function_lock);
427}
428
429void ipi_call_lock_irq(void)
430{
431 spin_lock_irq(&call_function_lock);
432}
433
434void ipi_call_unlock_irq(void)
435{
436 spin_unlock_irq(&call_function_lock);
437}