Showing error 1419

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: kernel/smp.c
Line in file: 422
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2012-05-21 20:30:05 UTC


Source:

  1/*
  2 * Generic helpers for smp ipi calls
  3 *
  4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  5 *
  6 */
  7#include <linux/init.h>
  8#include <linux/module.h>
  9#include <linux/percpu.h>
 10#include <linux/rcupdate.h>
 11#include <linux/rculist.h>
 12#include <linux/smp.h>
 13
 14static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
 15static LIST_HEAD(call_function_queue);
 16__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
 17
 18enum {
 19        CSD_FLAG_WAIT                = 0x01,
 20        CSD_FLAG_ALLOC                = 0x02,
 21};
 22
 23struct call_function_data {
 24        struct call_single_data csd;
 25        spinlock_t lock;
 26        unsigned int refs;
 27        cpumask_t cpumask;
 28        struct rcu_head rcu_head;
 29};
 30
 31struct call_single_queue {
 32        struct list_head list;
 33        spinlock_t lock;
 34};
 35
 36static int __cpuinit init_call_single_data(void)
 37{
 38        int i;
 39
 40        for_each_possible_cpu(i) {
 41                struct call_single_queue *q = &per_cpu(call_single_queue, i);
 42
 43                spin_lock_init(&q->lock);
 44                INIT_LIST_HEAD(&q->list);
 45        }
 46        return 0;
 47}
 48early_initcall(init_call_single_data);
 49
 50static void csd_flag_wait(struct call_single_data *data)
 51{
 52        /* Wait for response */
 53        do {
 54                if (!(data->flags & CSD_FLAG_WAIT))
 55                        break;
 56                cpu_relax();
 57        } while (1);
 58}
 59
 60/*
 61 * Insert a previously allocated call_single_data element for execution
 62 * on the given CPU. data must already have ->func, ->info, and ->flags set.
 63 */
 64static void generic_exec_single(int cpu, struct call_single_data *data)
 65{
 66        struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
 67        int wait = data->flags & CSD_FLAG_WAIT, ipi;
 68        unsigned long flags;
 69
 70        spin_lock_irqsave(&dst->lock, flags);
 71        ipi = list_empty(&dst->list);
 72        list_add_tail(&data->list, &dst->list);
 73        spin_unlock_irqrestore(&dst->lock, flags);
 74
 75        /*
 76         * Make the list addition visible before sending the ipi.
 77         */
 78        smp_mb();
 79
 80        if (ipi)
 81                arch_send_call_function_single_ipi(cpu);
 82
 83        if (wait)
 84                csd_flag_wait(data);
 85}
 86
 87static void rcu_free_call_data(struct rcu_head *head)
 88{
 89        struct call_function_data *data;
 90
 91        data = container_of(head, struct call_function_data, rcu_head);
 92
 93        kfree(data);
 94}
 95
 96/*
 97 * Invoked by arch to handle an IPI for call function. Must be called with
 98 * interrupts disabled.
 99 */
100void generic_smp_call_function_interrupt(void)
101{
102        struct call_function_data *data;
103        int cpu = get_cpu();
104
105        /*
106         * It's ok to use list_for_each_rcu() here even though we may delete
107         * 'pos', since list_del_rcu() doesn't clear ->next
108         */
109        rcu_read_lock();
110        list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
111                int refs;
112
113                if (!cpu_isset(cpu, data->cpumask))
114                        continue;
115
116                data->csd.func(data->csd.info);
117
118                spin_lock(&data->lock);
119                cpu_clear(cpu, data->cpumask);
120                WARN_ON(data->refs == 0);
121                data->refs--;
122                refs = data->refs;
123                spin_unlock(&data->lock);
124
125                if (refs)
126                        continue;
127
128                spin_lock(&call_function_lock);
129                list_del_rcu(&data->csd.list);
130                spin_unlock(&call_function_lock);
131
132                if (data->csd.flags & CSD_FLAG_WAIT) {
133                        /*
134                         * serialize stores to data with the flag clear
135                         * and wakeup
136                         */
137                        smp_wmb();
138                        data->csd.flags &= ~CSD_FLAG_WAIT;
139                }
140                if (data->csd.flags & CSD_FLAG_ALLOC)
141                        call_rcu(&data->rcu_head, rcu_free_call_data);
142        }
143        rcu_read_unlock();
144
145        put_cpu();
146}
147
148/*
149 * Invoked by arch to handle an IPI for call function single. Must be called
150 * from the arch with interrupts disabled.
151 */
152void generic_smp_call_function_single_interrupt(void)
153{
154        struct call_single_queue *q = &__get_cpu_var(call_single_queue);
155        LIST_HEAD(list);
156
157        /*
158         * Need to see other stores to list head for checking whether
159         * list is empty without holding q->lock
160         */
161        smp_read_barrier_depends();
162        while (!list_empty(&q->list)) {
163                unsigned int data_flags;
164
165                spin_lock(&q->lock);
166                list_replace_init(&q->list, &list);
167                spin_unlock(&q->lock);
168
169                while (!list_empty(&list)) {
170                        struct call_single_data *data;
171
172                        data = list_entry(list.next, struct call_single_data,
173                                                list);
174                        list_del(&data->list);
175
176                        /*
177                         * 'data' can be invalid after this call if
178                         * flags == 0 (when called through
179                         * generic_exec_single(), so save them away before
180                         * making the call.
181                         */
182                        data_flags = data->flags;
183
184                        data->func(data->info);
185
186                        if (data_flags & CSD_FLAG_WAIT) {
187                                smp_wmb();
188                                data->flags &= ~CSD_FLAG_WAIT;
189                        } else if (data_flags & CSD_FLAG_ALLOC)
190                                kfree(data);
191                }
192                /*
193                 * See comment on outer loop
194                 */
195                smp_read_barrier_depends();
196        }
197}
198
199/*
200 * smp_call_function_single - Run a function on a specific CPU
201 * @func: The function to run. This must be fast and non-blocking.
202 * @info: An arbitrary pointer to pass to the function.
203 * @wait: If true, wait until function has completed on other CPUs.
204 *
205 * Returns 0 on success, else a negative status code. Note that @wait
206 * will be implicitly turned on in case of allocation failures, since
207 * we fall back to on-stack allocation.
208 */
209int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
210                             int wait)
211{
212        struct call_single_data d;
213        unsigned long flags;
214        /* prevent preemption and reschedule on another processor,
215           as well as CPU removal */
216        int me = get_cpu();
217        int err = 0;
218
219        /* Can deadlock when called with interrupts disabled */
220        WARN_ON(irqs_disabled());
221
222        if (cpu == me) {
223                local_irq_save(flags);
224                func(info);
225                local_irq_restore(flags);
226        } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
227                struct call_single_data *data = NULL;
228
229                if (!wait) {
230                        data = kmalloc(sizeof(*data), GFP_ATOMIC);
231                        if (data)
232                                data->flags = CSD_FLAG_ALLOC;
233                }
234                if (!data) {
235                        data = &d;
236                        data->flags = CSD_FLAG_WAIT;
237                }
238
239                data->func = func;
240                data->info = info;
241                generic_exec_single(cpu, data);
242        } else {
243                err = -ENXIO;        /* CPU not online */
244        }
245
246        put_cpu();
247        return err;
248}
249EXPORT_SYMBOL(smp_call_function_single);
250
251/**
252 * __smp_call_function_single(): Run a function on another CPU
253 * @cpu: The CPU to run on.
254 * @data: Pre-allocated and setup data structure
255 *
256 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
257 * data structure. Useful for embedding @data inside other structures, for
258 * instance.
259 *
260 */
261void __smp_call_function_single(int cpu, struct call_single_data *data)
262{
263        /* Can deadlock when called with interrupts disabled */
264        WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
265
266        generic_exec_single(cpu, data);
267}
268
269/* Dummy function */
270static void quiesce_dummy(void *unused)
271{
272}
273
274/*
275 * Ensure stack based data used in call function mask is safe to free.
276 *
277 * This is needed by smp_call_function_mask when using on-stack data, because
278 * a single call function queue is shared by all CPUs, and any CPU may pick up
279 * the data item on the queue at any time before it is deleted. So we need to
280 * ensure that all CPUs have transitioned through a quiescent state after
281 * this call.
282 *
283 * This is a very slow function, implemented by sending synchronous IPIs to
284 * all possible CPUs. For this reason, we have to alloc data rather than use
285 * stack based data even in the case of synchronous calls. The stack based
286 * data is then just used for deadlock/oom fallback which will be very rare.
287 *
288 * If a faster scheme can be made, we could go back to preferring stack based
289 * data -- the data allocation/free is non-zero cost.
290 */
291static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
292{
293        struct call_single_data data;
294        int cpu;
295
296        data.func = quiesce_dummy;
297        data.info = NULL;
298
299        for_each_cpu_mask(cpu, mask) {
300                data.flags = CSD_FLAG_WAIT;
301                generic_exec_single(cpu, &data);
302        }
303}
304
305/**
306 * smp_call_function_mask(): Run a function on a set of other CPUs.
307 * @mask: The set of cpus to run on.
308 * @func: The function to run. This must be fast and non-blocking.
309 * @info: An arbitrary pointer to pass to the function.
310 * @wait: If true, wait (atomically) until function has completed on other CPUs.
311 *
312 * Returns 0 on success, else a negative status code.
313 *
314 * If @wait is true, then returns once @func has returned. Note that @wait
315 * will be implicitly turned on in case of allocation failures, since
316 * we fall back to on-stack allocation.
317 *
318 * You must not call this function with disabled interrupts or from a
319 * hardware interrupt handler or from a bottom half handler. Preemption
320 * must be disabled when calling this function.
321 */
322int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
323                           int wait)
324{
325        struct call_function_data d;
326        struct call_function_data *data = NULL;
327        cpumask_t allbutself;
328        unsigned long flags;
329        int cpu, num_cpus;
330        int slowpath = 0;
331
332        /* Can deadlock when called with interrupts disabled */
333        WARN_ON(irqs_disabled());
334
335        cpu = smp_processor_id();
336        allbutself = cpu_online_map;
337        cpu_clear(cpu, allbutself);
338        cpus_and(mask, mask, allbutself);
339        num_cpus = cpus_weight(mask);
340
341        /*
342         * If zero CPUs, return. If just a single CPU, turn this request
343         * into a targetted single call instead since it's faster.
344         */
345        if (!num_cpus)
346                return 0;
347        else if (num_cpus == 1) {
348                cpu = first_cpu(mask);
349                return smp_call_function_single(cpu, func, info, wait);
350        }
351
352        data = kmalloc(sizeof(*data), GFP_ATOMIC);
353        if (data) {
354                data->csd.flags = CSD_FLAG_ALLOC;
355                if (wait)
356                        data->csd.flags |= CSD_FLAG_WAIT;
357        } else {
358                data = &d;
359                data->csd.flags = CSD_FLAG_WAIT;
360                wait = 1;
361                slowpath = 1;
362        }
363
364        spin_lock_init(&data->lock);
365        data->csd.func = func;
366        data->csd.info = info;
367        data->refs = num_cpus;
368        data->cpumask = mask;
369
370        spin_lock_irqsave(&call_function_lock, flags);
371        list_add_tail_rcu(&data->csd.list, &call_function_queue);
372        spin_unlock_irqrestore(&call_function_lock, flags);
373
374        /*
375         * Make the list addition visible before sending the ipi.
376         */
377        smp_mb();
378
379        /* Send a message to all CPUs in the map */
380        arch_send_call_function_ipi(mask);
381
382        /* optionally wait for the CPUs to complete */
383        if (wait) {
384                csd_flag_wait(&data->csd);
385                if (unlikely(slowpath))
386                        smp_call_function_mask_quiesce_stack(mask);
387        }
388
389        return 0;
390}
391EXPORT_SYMBOL(smp_call_function_mask);
392
393/**
394 * smp_call_function(): Run a function on all other CPUs.
395 * @func: The function to run. This must be fast and non-blocking.
396 * @info: An arbitrary pointer to pass to the function.
397 * @wait: If true, wait (atomically) until function has completed on other CPUs.
398 *
399 * Returns 0 on success, else a negative status code.
400 *
401 * If @wait is true, then returns once @func has returned; otherwise
402 * it returns just before the target cpu calls @func. In case of allocation
403 * failure, @wait will be implicitly turned on.
404 *
405 * You must not call this function with disabled interrupts or from a
406 * hardware interrupt handler or from a bottom half handler.
407 */
408int smp_call_function(void (*func)(void *), void *info, int wait)
409{
410        int ret;
411
412        preempt_disable();
413        ret = smp_call_function_mask(cpu_online_map, func, info, wait);
414        preempt_enable();
415        return ret;
416}
417EXPORT_SYMBOL(smp_call_function);
418
419void ipi_call_lock(void)
420{
421        spin_lock(&call_function_lock);
422}
423
424void ipi_call_unlock(void)
425{
426        spin_unlock(&call_function_lock);
427}
428
429void ipi_call_lock_irq(void)
430{
431        spin_lock_irq(&call_function_lock);
432}
433
434void ipi_call_unlock_irq(void)
435{
436        spin_unlock_irq(&call_function_lock);
437}