1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62#include <linux/init.h>
63#include <linux/module.h>
64#include <linux/mm.h>
65#include <linux/device.h>
66#include <linux/dmaengine.h>
67#include <linux/hardirq.h>
68#include <linux/spinlock.h>
69#include <linux/percpu.h>
70#include <linux/rcupdate.h>
71#include <linux/mutex.h>
72#include <linux/jiffies.h>
73
74static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list);
77
78
79
80static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
81{
82 struct dma_chan *chan = to_dma_chan(dev);
83 unsigned long count = 0;
84 int i;
85
86 for_each_possible_cpu(i)
87 count += per_cpu_ptr(chan->local, i)->memcpy_count;
88
89 return sprintf(buf, "%lu\n", count);
90}
91
92static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
93 char *buf)
94{
95 struct dma_chan *chan = to_dma_chan(dev);
96 unsigned long count = 0;
97 int i;
98
99 for_each_possible_cpu(i)
100 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
101
102 return sprintf(buf, "%lu\n", count);
103}
104
105static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
106{
107 struct dma_chan *chan = to_dma_chan(dev);
108 int in_use = 0;
109
110 if (unlikely(chan->slow_ref) &&
111 atomic_read(&chan->refcount.refcount) > 1)
112 in_use = 1;
113 else {
114 if (local_read(&(per_cpu_ptr(chan->local,
115 get_cpu())->refcount)) > 0)
116 in_use = 1;
117 put_cpu();
118 }
119
120 return sprintf(buf, "%d\n", in_use);
121}
122
123static struct device_attribute dma_attrs[] = {
124 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
125 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
126 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
127 __ATTR_NULL
128};
129
130static void dma_async_device_cleanup(struct kref *kref);
131
132static void dma_dev_release(struct device *dev)
133{
134 struct dma_chan *chan = to_dma_chan(dev);
135 kref_put(&chan->device->refcount, dma_async_device_cleanup);
136}
137
138static struct class dma_devclass = {
139 .name = "dma",
140 .dev_attrs = dma_attrs,
141 .dev_release = dma_dev_release,
142};
143
144
145
146#define dma_chan_satisfies_mask(chan, mask) \
147 __dma_chan_satisfies_mask((chan), &(mask))
148static int
149__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
150{
151 dma_cap_mask_t has;
152
153 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
154 DMA_TX_TYPE_END);
155 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
156}
157
158
159
160
161
162
163
164static void dma_client_chan_alloc(struct dma_client *client)
165{
166 struct dma_device *device;
167 struct dma_chan *chan;
168 int desc;
169 enum dma_state_client ack;
170
171
172 list_for_each_entry(device, &dma_device_list, global_node) {
173
174 if (client->slave && client->slave->dma_dev
175 && client->slave->dma_dev != device->dev)
176 continue;
177
178 list_for_each_entry(chan, &device->channels, device_node) {
179 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
180 continue;
181
182 desc = chan->device->device_alloc_chan_resources(
183 chan, client);
184 if (desc >= 0) {
185 ack = client->event_callback(client,
186 chan,
187 DMA_RESOURCE_AVAILABLE);
188
189
190
191
192 if (ack == DMA_ACK) {
193 dma_chan_get(chan);
194 chan->client_count++;
195 } else if (ack == DMA_NAK)
196 return;
197 }
198 }
199 }
200}
201
202enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
203{
204 enum dma_status status;
205 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
206
207 dma_async_issue_pending(chan);
208 do {
209 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
210 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
211 printk(KERN_ERR "dma_sync_wait_timeout!\n");
212 return DMA_ERROR;
213 }
214 } while (status == DMA_IN_PROGRESS);
215
216 return status;
217}
218EXPORT_SYMBOL(dma_sync_wait);
219
220
221
222
223
224void dma_chan_cleanup(struct kref *kref)
225{
226 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
227 chan->device->device_free_chan_resources(chan);
228 kref_put(&chan->device->refcount, dma_async_device_cleanup);
229}
230EXPORT_SYMBOL(dma_chan_cleanup);
231
232static void dma_chan_free_rcu(struct rcu_head *rcu)
233{
234 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
235 int bias = 0x7FFFFFFF;
236 int i;
237 for_each_possible_cpu(i)
238 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
239 atomic_sub(bias, &chan->refcount.refcount);
240 kref_put(&chan->refcount, dma_chan_cleanup);
241}
242
243static void dma_chan_release(struct dma_chan *chan)
244{
245 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
246 chan->slow_ref = 1;
247 call_rcu(&chan->rcu, dma_chan_free_rcu);
248}
249
250
251
252
253static void dma_clients_notify_available(void)
254{
255 struct dma_client *client;
256
257 mutex_lock(&dma_list_mutex);
258
259 list_for_each_entry(client, &dma_client_list, global_node)
260 dma_client_chan_alloc(client);
261
262 mutex_unlock(&dma_list_mutex);
263}
264
265
266
267
268
269static void dma_clients_notify_removed(struct dma_chan *chan)
270{
271 struct dma_client *client;
272 enum dma_state_client ack;
273
274 mutex_lock(&dma_list_mutex);
275
276 list_for_each_entry(client, &dma_client_list, global_node) {
277 ack = client->event_callback(client, chan,
278 DMA_RESOURCE_REMOVED);
279
280
281
282
283 if (ack == DMA_ACK) {
284 dma_chan_put(chan);
285 chan->client_count--;
286 }
287 }
288
289 mutex_unlock(&dma_list_mutex);
290}
291
292
293
294
295
296void dma_async_client_register(struct dma_client *client)
297{
298
299 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
300 !client->slave);
301
302 mutex_lock(&dma_list_mutex);
303 list_add_tail(&client->global_node, &dma_client_list);
304 mutex_unlock(&dma_list_mutex);
305}
306EXPORT_SYMBOL(dma_async_client_register);
307
308
309
310
311
312
313
314void dma_async_client_unregister(struct dma_client *client)
315{
316 struct dma_device *device;
317 struct dma_chan *chan;
318 enum dma_state_client ack;
319
320 if (!client)
321 return;
322
323 mutex_lock(&dma_list_mutex);
324
325 list_for_each_entry(device, &dma_device_list, global_node)
326 list_for_each_entry(chan, &device->channels, device_node) {
327 ack = client->event_callback(client, chan,
328 DMA_RESOURCE_REMOVED);
329
330 if (ack == DMA_ACK) {
331 dma_chan_put(chan);
332 chan->client_count--;
333 }
334 }
335
336 list_del(&client->global_node);
337 mutex_unlock(&dma_list_mutex);
338}
339EXPORT_SYMBOL(dma_async_client_unregister);
340
341
342
343
344
345
346void dma_async_client_chan_request(struct dma_client *client)
347{
348 mutex_lock(&dma_list_mutex);
349 dma_client_chan_alloc(client);
350 mutex_unlock(&dma_list_mutex);
351}
352EXPORT_SYMBOL(dma_async_client_chan_request);
353
354
355
356
357
358int dma_async_device_register(struct dma_device *device)
359{
360 static int id;
361 int chancnt = 0, rc;
362 struct dma_chan* chan;
363
364 if (!device)
365 return -ENODEV;
366
367
368 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
369 !device->device_prep_dma_memcpy);
370 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
371 !device->device_prep_dma_xor);
372 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
373 !device->device_prep_dma_zero_sum);
374 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
375 !device->device_prep_dma_memset);
376 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
377 !device->device_prep_dma_interrupt);
378 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
379 !device->device_prep_slave_sg);
380 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
381 !device->device_terminate_all);
382
383 BUG_ON(!device->device_alloc_chan_resources);
384 BUG_ON(!device->device_free_chan_resources);
385 BUG_ON(!device->device_is_tx_complete);
386 BUG_ON(!device->device_issue_pending);
387 BUG_ON(!device->dev);
388
389 init_completion(&device->done);
390 kref_init(&device->refcount);
391
392 mutex_lock(&dma_list_mutex);
393 device->dev_id = id++;
394 mutex_unlock(&dma_list_mutex);
395
396
397 list_for_each_entry(chan, &device->channels, device_node) {
398 chan->local = alloc_percpu(typeof(*chan->local));
399 if (chan->local == NULL)
400 continue;
401
402 chan->chan_id = chancnt++;
403 chan->dev.class = &dma_devclass;
404 chan->dev.parent = device->dev;
405 dev_set_name(&chan->dev, "dma%dchan%d",
406 device->dev_id, chan->chan_id);
407
408 rc = device_register(&chan->dev);
409 if (rc) {
410 chancnt--;
411 free_percpu(chan->local);
412 chan->local = NULL;
413 goto err_out;
414 }
415
416
417 kref_get(&device->refcount);
418 kref_get(&device->refcount);
419 kref_init(&chan->refcount);
420 chan->client_count = 0;
421 chan->slow_ref = 0;
422 INIT_RCU_HEAD(&chan->rcu);
423 }
424
425 mutex_lock(&dma_list_mutex);
426 list_add_tail(&device->global_node, &dma_device_list);
427 mutex_unlock(&dma_list_mutex);
428
429 dma_clients_notify_available();
430
431 return 0;
432
433err_out:
434 list_for_each_entry(chan, &device->channels, device_node) {
435 if (chan->local == NULL)
436 continue;
437 kref_put(&device->refcount, dma_async_device_cleanup);
438 device_unregister(&chan->dev);
439 chancnt--;
440 free_percpu(chan->local);
441 }
442 return rc;
443}
444EXPORT_SYMBOL(dma_async_device_register);
445
446
447
448
449
450static void dma_async_device_cleanup(struct kref *kref)
451{
452 struct dma_device *device;
453
454 device = container_of(kref, struct dma_device, refcount);
455 complete(&device->done);
456}
457
458
459
460
461
462void dma_async_device_unregister(struct dma_device *device)
463{
464 struct dma_chan *chan;
465
466 mutex_lock(&dma_list_mutex);
467 list_del(&device->global_node);
468 mutex_unlock(&dma_list_mutex);
469
470 list_for_each_entry(chan, &device->channels, device_node) {
471 dma_clients_notify_removed(chan);
472 device_unregister(&chan->dev);
473 dma_chan_release(chan);
474 }
475
476 kref_put(&device->refcount, dma_async_device_cleanup);
477 wait_for_completion(&device->done);
478}
479EXPORT_SYMBOL(dma_async_device_unregister);
480
481
482
483
484
485
486
487
488
489
490
491
492
493dma_cookie_t
494dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
495 void *src, size_t len)
496{
497 struct dma_device *dev = chan->device;
498 struct dma_async_tx_descriptor *tx;
499 dma_addr_t dma_dest, dma_src;
500 dma_cookie_t cookie;
501 int cpu;
502
503 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
504 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
505 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
506 DMA_CTRL_ACK);
507
508 if (!tx) {
509 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
510 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
511 return -ENOMEM;
512 }
513
514 tx->callback = NULL;
515 cookie = tx->tx_submit(tx);
516
517 cpu = get_cpu();
518 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
519 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
520 put_cpu();
521
522 return cookie;
523}
524EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539dma_cookie_t
540dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
541 unsigned int offset, void *kdata, size_t len)
542{
543 struct dma_device *dev = chan->device;
544 struct dma_async_tx_descriptor *tx;
545 dma_addr_t dma_dest, dma_src;
546 dma_cookie_t cookie;
547 int cpu;
548
549 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
550 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
551 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
552 DMA_CTRL_ACK);
553
554 if (!tx) {
555 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
556 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
557 return -ENOMEM;
558 }
559
560 tx->callback = NULL;
561 cookie = tx->tx_submit(tx);
562
563 cpu = get_cpu();
564 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
565 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
566 put_cpu();
567
568 return cookie;
569}
570EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586dma_cookie_t
587dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
588 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
589 size_t len)
590{
591 struct dma_device *dev = chan->device;
592 struct dma_async_tx_descriptor *tx;
593 dma_addr_t dma_dest, dma_src;
594 dma_cookie_t cookie;
595 int cpu;
596
597 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
598 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
599 DMA_FROM_DEVICE);
600 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
601 DMA_CTRL_ACK);
602
603 if (!tx) {
604 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
605 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
606 return -ENOMEM;
607 }
608
609 tx->callback = NULL;
610 cookie = tx->tx_submit(tx);
611
612 cpu = get_cpu();
613 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
614 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
615 put_cpu();
616
617 return cookie;
618}
619EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
620
621void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
622 struct dma_chan *chan)
623{
624 tx->chan = chan;
625 spin_lock_init(&tx->lock);
626}
627EXPORT_SYMBOL(dma_async_tx_descriptor_init);
628
629static int __init dma_bus_init(void)
630{
631 mutex_init(&dma_list_mutex);
632 return class_register(&dma_devclass);
633}
634subsys_initcall(dma_bus_init);
635