Showing error 825

User: Jiri Slaby
Error type: Resource Leak
Error type description: The code omits to put the resource to the system for reuse
File location: drivers/dma/dmaengine.c
Line in file: 117
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2011-11-07 22:40:13 UTC


Source:

  1/*
  2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of the GNU General Public License as published by the Free
  6 * Software Foundation; either version 2 of the License, or (at your option)
  7 * any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program; if not, write to the Free Software Foundation, Inc., 59
 16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 17 *
 18 * The full GNU General Public License is included in this distribution in the
 19 * file called COPYING.
 20 */
 21
 22/*
 23 * This code implements the DMA subsystem. It provides a HW-neutral interface
 24 * for other kernel code to use asynchronous memory copy capabilities,
 25 * if present, and allows different HW DMA drivers to register as providing
 26 * this capability.
 27 *
 28 * Due to the fact we are accelerating what is already a relatively fast
 29 * operation, the code goes to great lengths to avoid additional overhead,
 30 * such as locking.
 31 *
 32 * LOCKING:
 33 *
 34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
 35 * Both of these are protected by a mutex, dma_list_mutex.
 36 *
 37 * Each device has a channels list, which runs unlocked but is never modified
 38 * once the device is registered, it's just setup by the driver.
 39 *
 40 * Each client is responsible for keeping track of the channels it uses.  See
 41 * the definition of dma_event_callback in dmaengine.h.
 42 *
 43 * Each device has a kref, which is initialized to 1 when the device is
 44 * registered. A kref_get is done for each device registered.  When the
 45 * device is released, the corresponding kref_put is done in the release
 46 * method. Every time one of the device's channels is allocated to a client,
 47 * a kref_get occurs.  When the channel is freed, the corresponding kref_put
 48 * happens. The device's release function does a completion, so
 49 * unregister_device does a remove event, device_unregister, a kref_put
 50 * for the first reference, then waits on the completion for all other
 51 * references to finish.
 52 *
 53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
 54 * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
 55 * signals that it wants to use a channel, and dma_chan_put is called when
 56 * a channel is removed or a client using it is unregistered.  A client can
 57 * take extra references per outstanding transaction, as is the case with
 58 * the NET DMA client.  The release function does a kref_put on the device.
 59 *        -ChrisL, DanW
 60 */
 61
 62#include <linux/init.h>
 63#include <linux/module.h>
 64#include <linux/mm.h>
 65#include <linux/device.h>
 66#include <linux/dmaengine.h>
 67#include <linux/hardirq.h>
 68#include <linux/spinlock.h>
 69#include <linux/percpu.h>
 70#include <linux/rcupdate.h>
 71#include <linux/mutex.h>
 72#include <linux/jiffies.h>
 73
 74static DEFINE_MUTEX(dma_list_mutex);
 75static LIST_HEAD(dma_device_list);
 76static LIST_HEAD(dma_client_list);
 77
 78/* --- sysfs implementation --- */
 79
 80static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 81{
 82        struct dma_chan *chan = to_dma_chan(dev);
 83        unsigned long count = 0;
 84        int i;
 85
 86        for_each_possible_cpu(i)
 87                count += per_cpu_ptr(chan->local, i)->memcpy_count;
 88
 89        return sprintf(buf, "%lu\n", count);
 90}
 91
 92static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 93                                      char *buf)
 94{
 95        struct dma_chan *chan = to_dma_chan(dev);
 96        unsigned long count = 0;
 97        int i;
 98
 99        for_each_possible_cpu(i)
100                count += per_cpu_ptr(chan->local, i)->bytes_transferred;
101
102        return sprintf(buf, "%lu\n", count);
103}
104
105static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
106{
107        struct dma_chan *chan = to_dma_chan(dev);
108        int in_use = 0;
109
110        if (unlikely(chan->slow_ref) &&
111                atomic_read(&chan->refcount.refcount) > 1)
112                in_use = 1;
113        else {
114                if (local_read(&(per_cpu_ptr(chan->local,
115                        get_cpu())->refcount)) > 0)
116                        in_use = 1;
117                put_cpu();
118        }
119
120        return sprintf(buf, "%d\n", in_use);
121}
122
123static struct device_attribute dma_attrs[] = {
124        __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
125        __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
126        __ATTR(in_use, S_IRUGO, show_in_use, NULL),
127        __ATTR_NULL
128};
129
130static void dma_async_device_cleanup(struct kref *kref);
131
132static void dma_dev_release(struct device *dev)
133{
134        struct dma_chan *chan = to_dma_chan(dev);
135        kref_put(&chan->device->refcount, dma_async_device_cleanup);
136}
137
138static struct class dma_devclass = {
139        .name                = "dma",
140        .dev_attrs        = dma_attrs,
141        .dev_release        = dma_dev_release,
142};
143
144/* --- client and device registration --- */
145
146#define dma_chan_satisfies_mask(chan, mask) \
147        __dma_chan_satisfies_mask((chan), &(mask))
148static int
149__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
150{
151        dma_cap_mask_t has;
152
153        bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
154                DMA_TX_TYPE_END);
155        return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156}
157
158/**
159 * dma_client_chan_alloc - try to allocate channels to a client
160 * @client: &dma_client
161 *
162 * Called with dma_list_mutex held.
163 */
164static void dma_client_chan_alloc(struct dma_client *client)
165{
166        struct dma_device *device;
167        struct dma_chan *chan;
168        int desc;        /* allocated descriptor count */
169        enum dma_state_client ack;
170
171        /* Find a channel */
172        list_for_each_entry(device, &dma_device_list, global_node) {
173                /* Does the client require a specific DMA controller? */
174                if (client->slave && client->slave->dma_dev
175                                && client->slave->dma_dev != device->dev)
176                        continue;
177
178                list_for_each_entry(chan, &device->channels, device_node) {
179                        if (!dma_chan_satisfies_mask(chan, client->cap_mask))
180                                continue;
181
182                        desc = chan->device->device_alloc_chan_resources(
183                                        chan, client);
184                        if (desc >= 0) {
185                                ack = client->event_callback(client,
186                                                chan,
187                                                DMA_RESOURCE_AVAILABLE);
188
189                                /* we are done once this client rejects
190                                 * an available resource
191                                 */
192                                if (ack == DMA_ACK) {
193                                        dma_chan_get(chan);
194                                        chan->client_count++;
195                                } else if (ack == DMA_NAK)
196                                        return;
197                        }
198                }
199        }
200}
201
202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
203{
204        enum dma_status status;
205        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
206
207        dma_async_issue_pending(chan);
208        do {
209                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
210                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
211                        printk(KERN_ERR "dma_sync_wait_timeout!\n");
212                        return DMA_ERROR;
213                }
214        } while (status == DMA_IN_PROGRESS);
215
216        return status;
217}
218EXPORT_SYMBOL(dma_sync_wait);
219
220/**
221 * dma_chan_cleanup - release a DMA channel's resources
222 * @kref: kernel reference structure that contains the DMA channel device
223 */
224void dma_chan_cleanup(struct kref *kref)
225{
226        struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
227        chan->device->device_free_chan_resources(chan);
228        kref_put(&chan->device->refcount, dma_async_device_cleanup);
229}
230EXPORT_SYMBOL(dma_chan_cleanup);
231
232static void dma_chan_free_rcu(struct rcu_head *rcu)
233{
234        struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
235        int bias = 0x7FFFFFFF;
236        int i;
237        for_each_possible_cpu(i)
238                bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
239        atomic_sub(bias, &chan->refcount.refcount);
240        kref_put(&chan->refcount, dma_chan_cleanup);
241}
242
243static void dma_chan_release(struct dma_chan *chan)
244{
245        atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
246        chan->slow_ref = 1;
247        call_rcu(&chan->rcu, dma_chan_free_rcu);
248}
249
250/**
251 * dma_chans_notify_available - broadcast available channels to the clients
252 */
253static void dma_clients_notify_available(void)
254{
255        struct dma_client *client;
256
257        mutex_lock(&dma_list_mutex);
258
259        list_for_each_entry(client, &dma_client_list, global_node)
260                dma_client_chan_alloc(client);
261
262        mutex_unlock(&dma_list_mutex);
263}
264
265/**
266 * dma_chans_notify_available - tell the clients that a channel is going away
267 * @chan: channel on its way out
268 */
269static void dma_clients_notify_removed(struct dma_chan *chan)
270{
271        struct dma_client *client;
272        enum dma_state_client ack;
273
274        mutex_lock(&dma_list_mutex);
275
276        list_for_each_entry(client, &dma_client_list, global_node) {
277                ack = client->event_callback(client, chan,
278                                DMA_RESOURCE_REMOVED);
279
280                /* client was holding resources for this channel so
281                 * free it
282                 */
283                if (ack == DMA_ACK) {
284                        dma_chan_put(chan);
285                        chan->client_count--;
286                }
287        }
288
289        mutex_unlock(&dma_list_mutex);
290}
291
292/**
293 * dma_async_client_register - register a &dma_client
294 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
295 */
296void dma_async_client_register(struct dma_client *client)
297{
298        /* validate client data */
299        BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300                !client->slave);
301
302        mutex_lock(&dma_list_mutex);
303        list_add_tail(&client->global_node, &dma_client_list);
304        mutex_unlock(&dma_list_mutex);
305}
306EXPORT_SYMBOL(dma_async_client_register);
307
308/**
309 * dma_async_client_unregister - unregister a client and free the &dma_client
310 * @client: &dma_client to free
311 *
312 * Force frees any allocated DMA channels, frees the &dma_client memory
313 */
314void dma_async_client_unregister(struct dma_client *client)
315{
316        struct dma_device *device;
317        struct dma_chan *chan;
318        enum dma_state_client ack;
319
320        if (!client)
321                return;
322
323        mutex_lock(&dma_list_mutex);
324        /* free all channels the client is holding */
325        list_for_each_entry(device, &dma_device_list, global_node)
326                list_for_each_entry(chan, &device->channels, device_node) {
327                        ack = client->event_callback(client, chan,
328                                DMA_RESOURCE_REMOVED);
329
330                        if (ack == DMA_ACK) {
331                                dma_chan_put(chan);
332                                chan->client_count--;
333                        }
334                }
335
336        list_del(&client->global_node);
337        mutex_unlock(&dma_list_mutex);
338}
339EXPORT_SYMBOL(dma_async_client_unregister);
340
341/**
342 * dma_async_client_chan_request - send all available channels to the
343 * client that satisfy the capability mask
344 * @client - requester
345 */
346void dma_async_client_chan_request(struct dma_client *client)
347{
348        mutex_lock(&dma_list_mutex);
349        dma_client_chan_alloc(client);
350        mutex_unlock(&dma_list_mutex);
351}
352EXPORT_SYMBOL(dma_async_client_chan_request);
353
354/**
355 * dma_async_device_register - registers DMA devices found
356 * @device: &dma_device
357 */
358int dma_async_device_register(struct dma_device *device)
359{
360        static int id;
361        int chancnt = 0, rc;
362        struct dma_chan* chan;
363
364        if (!device)
365                return -ENODEV;
366
367        /* validate device routines */
368        BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
369                !device->device_prep_dma_memcpy);
370        BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
371                !device->device_prep_dma_xor);
372        BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
373                !device->device_prep_dma_zero_sum);
374        BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
375                !device->device_prep_dma_memset);
376        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
377                !device->device_prep_dma_interrupt);
378        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
379                !device->device_prep_slave_sg);
380        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
381                !device->device_terminate_all);
382
383        BUG_ON(!device->device_alloc_chan_resources);
384        BUG_ON(!device->device_free_chan_resources);
385        BUG_ON(!device->device_is_tx_complete);
386        BUG_ON(!device->device_issue_pending);
387        BUG_ON(!device->dev);
388
389        init_completion(&device->done);
390        kref_init(&device->refcount);
391
392        mutex_lock(&dma_list_mutex);
393        device->dev_id = id++;
394        mutex_unlock(&dma_list_mutex);
395
396        /* represent channels in sysfs. Probably want devs too */
397        list_for_each_entry(chan, &device->channels, device_node) {
398                chan->local = alloc_percpu(typeof(*chan->local));
399                if (chan->local == NULL)
400                        continue;
401
402                chan->chan_id = chancnt++;
403                chan->dev.class = &dma_devclass;
404                chan->dev.parent = device->dev;
405                dev_set_name(&chan->dev, "dma%dchan%d",
406                             device->dev_id, chan->chan_id);
407
408                rc = device_register(&chan->dev);
409                if (rc) {
410                        chancnt--;
411                        free_percpu(chan->local);
412                        chan->local = NULL;
413                        goto err_out;
414                }
415
416                /* One for the channel, one of the class device */
417                kref_get(&device->refcount);
418                kref_get(&device->refcount);
419                kref_init(&chan->refcount);
420                chan->client_count = 0;
421                chan->slow_ref = 0;
422                INIT_RCU_HEAD(&chan->rcu);
423        }
424
425        mutex_lock(&dma_list_mutex);
426        list_add_tail(&device->global_node, &dma_device_list);
427        mutex_unlock(&dma_list_mutex);
428
429        dma_clients_notify_available();
430
431        return 0;
432
433err_out:
434        list_for_each_entry(chan, &device->channels, device_node) {
435                if (chan->local == NULL)
436                        continue;
437                kref_put(&device->refcount, dma_async_device_cleanup);
438                device_unregister(&chan->dev);
439                chancnt--;
440                free_percpu(chan->local);
441        }
442        return rc;
443}
444EXPORT_SYMBOL(dma_async_device_register);
445
446/**
447 * dma_async_device_cleanup - function called when all references are released
448 * @kref: kernel reference object
449 */
450static void dma_async_device_cleanup(struct kref *kref)
451{
452        struct dma_device *device;
453
454        device = container_of(kref, struct dma_device, refcount);
455        complete(&device->done);
456}
457
458/**
459 * dma_async_device_unregister - unregisters DMA devices
460 * @device: &dma_device
461 */
462void dma_async_device_unregister(struct dma_device *device)
463{
464        struct dma_chan *chan;
465
466        mutex_lock(&dma_list_mutex);
467        list_del(&device->global_node);
468        mutex_unlock(&dma_list_mutex);
469
470        list_for_each_entry(chan, &device->channels, device_node) {
471                dma_clients_notify_removed(chan);
472                device_unregister(&chan->dev);
473                dma_chan_release(chan);
474        }
475
476        kref_put(&device->refcount, dma_async_device_cleanup);
477        wait_for_completion(&device->done);
478}
479EXPORT_SYMBOL(dma_async_device_unregister);
480
481/**
482 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
483 * @chan: DMA channel to offload copy to
484 * @dest: destination address (virtual)
485 * @src: source address (virtual)
486 * @len: length
487 *
488 * Both @dest and @src must be mappable to a bus address according to the
489 * DMA mapping API rules for streaming mappings.
490 * Both @dest and @src must stay memory resident (kernel memory or locked
491 * user space pages).
492 */
493dma_cookie_t
494dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
495                        void *src, size_t len)
496{
497        struct dma_device *dev = chan->device;
498        struct dma_async_tx_descriptor *tx;
499        dma_addr_t dma_dest, dma_src;
500        dma_cookie_t cookie;
501        int cpu;
502
503        dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
504        dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
505        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
506                                         DMA_CTRL_ACK);
507
508        if (!tx) {
509                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
510                dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
511                return -ENOMEM;
512        }
513
514        tx->callback = NULL;
515        cookie = tx->tx_submit(tx);
516
517        cpu = get_cpu();
518        per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
519        per_cpu_ptr(chan->local, cpu)->memcpy_count++;
520        put_cpu();
521
522        return cookie;
523}
524EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
525
526/**
527 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
528 * @chan: DMA channel to offload copy to
529 * @page: destination page
530 * @offset: offset in page to copy to
531 * @kdata: source address (virtual)
532 * @len: length
533 *
534 * Both @page/@offset and @kdata must be mappable to a bus address according
535 * to the DMA mapping API rules for streaming mappings.
536 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
537 * locked user space pages)
538 */
539dma_cookie_t
540dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
541                        unsigned int offset, void *kdata, size_t len)
542{
543        struct dma_device *dev = chan->device;
544        struct dma_async_tx_descriptor *tx;
545        dma_addr_t dma_dest, dma_src;
546        dma_cookie_t cookie;
547        int cpu;
548
549        dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
550        dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
551        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
552                                         DMA_CTRL_ACK);
553
554        if (!tx) {
555                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
556                dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
557                return -ENOMEM;
558        }
559
560        tx->callback = NULL;
561        cookie = tx->tx_submit(tx);
562
563        cpu = get_cpu();
564        per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
565        per_cpu_ptr(chan->local, cpu)->memcpy_count++;
566        put_cpu();
567
568        return cookie;
569}
570EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
571
572/**
573 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
574 * @chan: DMA channel to offload copy to
575 * @dest_pg: destination page
576 * @dest_off: offset in page to copy to
577 * @src_pg: source page
578 * @src_off: offset in page to copy from
579 * @len: length
580 *
581 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
582 * address according to the DMA mapping API rules for streaming mappings.
583 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
584 * (kernel memory or locked user space pages).
585 */
586dma_cookie_t
587dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
588        unsigned int dest_off, struct page *src_pg, unsigned int src_off,
589        size_t len)
590{
591        struct dma_device *dev = chan->device;
592        struct dma_async_tx_descriptor *tx;
593        dma_addr_t dma_dest, dma_src;
594        dma_cookie_t cookie;
595        int cpu;
596
597        dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
598        dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
599                                DMA_FROM_DEVICE);
600        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
601                                         DMA_CTRL_ACK);
602
603        if (!tx) {
604                dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
605                dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
606                return -ENOMEM;
607        }
608
609        tx->callback = NULL;
610        cookie = tx->tx_submit(tx);
611
612        cpu = get_cpu();
613        per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
614        per_cpu_ptr(chan->local, cpu)->memcpy_count++;
615        put_cpu();
616
617        return cookie;
618}
619EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
620
621void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
622        struct dma_chan *chan)
623{
624        tx->chan = chan;
625        spin_lock_init(&tx->lock);
626}
627EXPORT_SYMBOL(dma_async_tx_descriptor_init);
628
629static int __init dma_bus_init(void)
630{
631        mutex_init(&dma_list_mutex);
632        return class_register(&dma_devclass);
633}
634subsys_initcall(dma_bus_init);
635