User: | Jiri Slaby |
Error type: | Invalid Pointer Dereference |
Error type description: | A pointer which is invalid is being dereferenced |
File location: | drivers/media/video/videobuf-dma-sg.c |
Line in file: | 421 |
Project: | Linux Kernel |
Project version: | 2.6.28 |
Tools: |
Stanse
(1.2)
Smatch (1.59) |
Entered: | 2011-11-07 22:22:22 UTC |
1/* 2 * helper functions for SG DMA video4linux capture buffers 3 * 4 * The functions expect the hardware being able to scatter gather 5 * (i.e. the buffers are not linear in physical memory, but fragmented 6 * into PAGE_SIZE chunks). They also assume the driver does not need 7 * to touch the video data. 8 * 9 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> 10 * 11 * Highly based on video-buf written originally by: 12 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> 13 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> 14 * (c) 2006 Ted Walther and John Sokol 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License as published by 18 * the Free Software Foundation; either version 2 19 */ 20 21#include <linux/init.h> 22#include <linux/module.h> 23#include <linux/moduleparam.h> 24#include <linux/slab.h> 25#include <linux/interrupt.h> 26 27#include <linux/dma-mapping.h> 28#include <linux/vmalloc.h> 29#include <linux/pagemap.h> 30#include <linux/scatterlist.h> 31#include <asm/page.h> 32#include <asm/pgtable.h> 33 34#include <media/videobuf-dma-sg.h> 35 36#define MAGIC_DMABUF 0x19721112 37#define MAGIC_SG_MEM 0x17890714 38 39#define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \ 40 { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); } 41 42static int debug; 43module_param(debug, int, 0644); 44 45MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers"); 46MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); 47MODULE_LICENSE("GPL"); 48 49#define dprintk(level, fmt, arg...) if (debug >= level) \ 50 printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg) 51 52/* --------------------------------------------------------------------- */ 53 54struct scatterlist* 55videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages) 56{ 57 struct scatterlist *sglist; 58 struct page *pg; 59 int i; 60 61 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); 62 if (NULL == sglist) 63 return NULL; 64 sg_init_table(sglist, nr_pages); 65 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { 66 pg = vmalloc_to_page(virt); 67 if (NULL == pg) 68 goto err; 69 BUG_ON(PageHighMem(pg)); 70 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); 71 } 72 return sglist; 73 74 err: 75 kfree(sglist); 76 return NULL; 77} 78 79struct scatterlist* 80videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset) 81{ 82 struct scatterlist *sglist; 83 int i; 84 85 if (NULL == pages[0]) 86 return NULL; 87 sglist = kmalloc(nr_pages * sizeof(*sglist), GFP_KERNEL); 88 if (NULL == sglist) 89 return NULL; 90 sg_init_table(sglist, nr_pages); 91 92 if (PageHighMem(pages[0])) 93 /* DMA to highmem pages might not work */ 94 goto highmem; 95 sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); 96 for (i = 1; i < nr_pages; i++) { 97 if (NULL == pages[i]) 98 goto nopage; 99 if (PageHighMem(pages[i])) 100 goto highmem; 101 sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0); 102 } 103 return sglist; 104 105 nopage: 106 dprintk(2,"sgl: oops - no page\n"); 107 kfree(sglist); 108 return NULL; 109 110 highmem: 111 dprintk(2,"sgl: oops - highmem page\n"); 112 kfree(sglist); 113 return NULL; 114} 115 116/* --------------------------------------------------------------------- */ 117 118struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf) 119{ 120 struct videobuf_dma_sg_memory *mem = buf->priv; 121 BUG_ON(!mem); 122 123 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); 124 125 return &mem->dma; 126} 127 128void videobuf_dma_init(struct videobuf_dmabuf *dma) 129{ 130 memset(dma,0,sizeof(*dma)); 131 dma->magic = MAGIC_DMABUF; 132} 133 134static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, 135 int direction, unsigned long data, unsigned long size) 136{ 137 unsigned long first,last; 138 int err, rw = 0; 139 140 dma->direction = direction; 141 switch (dma->direction) { 142 case DMA_FROM_DEVICE: 143 rw = READ; 144 break; 145 case DMA_TO_DEVICE: 146 rw = WRITE; 147 break; 148 default: 149 BUG(); 150 } 151 152 first = (data & PAGE_MASK) >> PAGE_SHIFT; 153 last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT; 154 dma->offset = data & ~PAGE_MASK; 155 dma->nr_pages = last-first+1; 156 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*), 157 GFP_KERNEL); 158 if (NULL == dma->pages) 159 return -ENOMEM; 160 dprintk(1,"init user [0x%lx+0x%lx => %d pages]\n", 161 data,size,dma->nr_pages); 162 163 err = get_user_pages(current,current->mm, 164 data & PAGE_MASK, dma->nr_pages, 165 rw == READ, 1, /* force */ 166 dma->pages, NULL); 167 168 if (err != dma->nr_pages) { 169 dma->nr_pages = (err >= 0) ? err : 0; 170 dprintk(1,"get_user_pages: err=%d [%d]\n",err,dma->nr_pages); 171 return err < 0 ? err : -EINVAL; 172 } 173 return 0; 174} 175 176int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction, 177 unsigned long data, unsigned long size) 178{ 179 int ret; 180 down_read(¤t->mm->mmap_sem); 181 ret = videobuf_dma_init_user_locked(dma, direction, data, size); 182 up_read(¤t->mm->mmap_sem); 183 184 return ret; 185} 186 187int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction, 188 int nr_pages) 189{ 190 dprintk(1,"init kernel [%d pages]\n",nr_pages); 191 dma->direction = direction; 192 dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT); 193 if (NULL == dma->vmalloc) { 194 dprintk(1,"vmalloc_32(%d pages) failed\n",nr_pages); 195 return -ENOMEM; 196 } 197 dprintk(1,"vmalloc is at addr 0x%08lx, size=%d\n", 198 (unsigned long)dma->vmalloc, 199 nr_pages << PAGE_SHIFT); 200 memset(dma->vmalloc,0,nr_pages << PAGE_SHIFT); 201 dma->nr_pages = nr_pages; 202 return 0; 203} 204 205int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction, 206 dma_addr_t addr, int nr_pages) 207{ 208 dprintk(1,"init overlay [%d pages @ bus 0x%lx]\n", 209 nr_pages,(unsigned long)addr); 210 dma->direction = direction; 211 if (0 == addr) 212 return -EINVAL; 213 214 dma->bus_addr = addr; 215 dma->nr_pages = nr_pages; 216 return 0; 217} 218 219int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma) 220{ 221 MAGIC_CHECK(dma->magic,MAGIC_DMABUF); 222 BUG_ON(0 == dma->nr_pages); 223 224 if (dma->pages) { 225 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, 226 dma->offset); 227 } 228 if (dma->vmalloc) { 229 dma->sglist = videobuf_vmalloc_to_sg 230 (dma->vmalloc,dma->nr_pages); 231 } 232 if (dma->bus_addr) { 233 dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); 234 if (NULL != dma->sglist) { 235 dma->sglen = 1; 236 sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK; 237 dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK; 238 sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE; 239 } 240 } 241 if (NULL == dma->sglist) { 242 dprintk(1,"scatterlist is NULL\n"); 243 return -ENOMEM; 244 } 245 if (!dma->bus_addr) { 246 dma->sglen = dma_map_sg(q->dev, dma->sglist, 247 dma->nr_pages, dma->direction); 248 if (0 == dma->sglen) { 249 printk(KERN_WARNING 250 "%s: videobuf_map_sg failed\n",__func__); 251 kfree(dma->sglist); 252 dma->sglist = NULL; 253 dma->sglen = 0; 254 return -EIO; 255 } 256 } 257 return 0; 258} 259 260int videobuf_dma_sync(struct videobuf_queue *q, struct videobuf_dmabuf *dma) 261{ 262 MAGIC_CHECK(dma->magic, MAGIC_DMABUF); 263 BUG_ON(!dma->sglen); 264 265 dma_sync_sg_for_cpu(q->dev, dma->sglist, dma->nr_pages, dma->direction); 266 return 0; 267} 268 269int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma) 270{ 271 MAGIC_CHECK(dma->magic, MAGIC_DMABUF); 272 if (!dma->sglen) 273 return 0; 274 275 dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction); 276 277 kfree(dma->sglist); 278 dma->sglist = NULL; 279 dma->sglen = 0; 280 return 0; 281} 282 283int videobuf_dma_free(struct videobuf_dmabuf *dma) 284{ 285 MAGIC_CHECK(dma->magic,MAGIC_DMABUF); 286 BUG_ON(dma->sglen); 287 288 if (dma->pages) { 289 int i; 290 for (i=0; i < dma->nr_pages; i++) 291 page_cache_release(dma->pages[i]); 292 kfree(dma->pages); 293 dma->pages = NULL; 294 } 295 296 vfree(dma->vmalloc); 297 dma->vmalloc = NULL; 298 299 if (dma->bus_addr) { 300 dma->bus_addr = 0; 301 } 302 dma->direction = DMA_NONE; 303 return 0; 304} 305 306/* --------------------------------------------------------------------- */ 307 308int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma) 309{ 310 struct videobuf_queue q; 311 312 q.dev = dev; 313 314 return videobuf_dma_map(&q, dma); 315} 316 317int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma) 318{ 319 struct videobuf_queue q; 320 321 q.dev = dev; 322 323 return videobuf_dma_unmap(&q, dma); 324} 325 326/* --------------------------------------------------------------------- */ 327 328static void 329videobuf_vm_open(struct vm_area_struct *vma) 330{ 331 struct videobuf_mapping *map = vma->vm_private_data; 332 333 dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map, 334 map->count,vma->vm_start,vma->vm_end); 335 map->count++; 336} 337 338static void 339videobuf_vm_close(struct vm_area_struct *vma) 340{ 341 struct videobuf_mapping *map = vma->vm_private_data; 342 struct videobuf_queue *q = map->q; 343 struct videobuf_dma_sg_memory *mem; 344 int i; 345 346 dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map, 347 map->count,vma->vm_start,vma->vm_end); 348 349 map->count--; 350 if (0 == map->count) { 351 dprintk(1,"munmap %p q=%p\n",map,q); 352 mutex_lock(&q->vb_lock); 353 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 354 if (NULL == q->bufs[i]) 355 continue; 356 mem=q->bufs[i]->priv; 357 358 if (!mem) 359 continue; 360 361 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM); 362 363 if (q->bufs[i]->map != map) 364 continue; 365 q->bufs[i]->map = NULL; 366 q->bufs[i]->baddr = 0; 367 q->ops->buf_release(q,q->bufs[i]); 368 } 369 mutex_unlock(&q->vb_lock); 370 kfree(map); 371 } 372 return; 373} 374 375/* 376 * Get a anonymous page for the mapping. Make sure we can DMA to that 377 * memory location with 32bit PCI devices (i.e. don't use highmem for 378 * now ...). Bounce buffers don't work very well for the data rates 379 * video capture has. 380 */ 381static int 382videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 383{ 384 struct page *page; 385 386 dprintk(3,"fault: fault @ %08lx [vma %08lx-%08lx]\n", 387 (unsigned long)vmf->virtual_address,vma->vm_start,vma->vm_end); 388 page = alloc_page(GFP_USER | __GFP_DMA32); 389 if (!page) 390 return VM_FAULT_OOM; 391 clear_user_page(page_address(page), (unsigned long)vmf->virtual_address, 392 page); 393 vmf->page = page; 394 return 0; 395} 396 397static struct vm_operations_struct videobuf_vm_ops = 398{ 399 .open = videobuf_vm_open, 400 .close = videobuf_vm_close, 401 .fault = videobuf_vm_fault, 402}; 403 404/* --------------------------------------------------------------------- 405 * SG handlers for the generic methods 406 */ 407 408/* Allocated area consists on 3 parts: 409 struct video_buffer 410 struct <driver>_buffer (cx88_buffer, saa7134_buf, ...) 411 struct videobuf_dma_sg_memory 412 */ 413 414static void *__videobuf_alloc(size_t size) 415{ 416 struct videobuf_dma_sg_memory *mem; 417 struct videobuf_buffer *vb; 418 419 vb = kzalloc(size+sizeof(*mem),GFP_KERNEL); 420 421 mem = vb->priv = ((char *)vb)+size; 422 mem->magic=MAGIC_SG_MEM; 423 424 videobuf_dma_init(&mem->dma); 425 426 dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n", 427 __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb), 428 mem,(long)sizeof(*mem)); 429 430 return vb; 431} 432 433static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf) 434{ 435 struct videobuf_dma_sg_memory *mem = buf->priv; 436 BUG_ON(!mem); 437 438 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); 439 440 return mem->dma.vmalloc; 441} 442 443static int __videobuf_iolock (struct videobuf_queue* q, 444 struct videobuf_buffer *vb, 445 struct v4l2_framebuffer *fbuf) 446{ 447 int err,pages; 448 dma_addr_t bus; 449 struct videobuf_dma_sg_memory *mem = vb->priv; 450 BUG_ON(!mem); 451 452 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); 453 454 switch (vb->memory) { 455 case V4L2_MEMORY_MMAP: 456 case V4L2_MEMORY_USERPTR: 457 if (0 == vb->baddr) { 458 /* no userspace addr -- kernel bounce buffer */ 459 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; 460 err = videobuf_dma_init_kernel( &mem->dma, 461 DMA_FROM_DEVICE, 462 pages ); 463 if (0 != err) 464 return err; 465 } else if (vb->memory == V4L2_MEMORY_USERPTR) { 466 /* dma directly to userspace */ 467 err = videobuf_dma_init_user( &mem->dma, 468 DMA_FROM_DEVICE, 469 vb->baddr,vb->bsize ); 470 if (0 != err) 471 return err; 472 } else { 473 /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP 474 buffers can only be called from videobuf_qbuf 475 we take current->mm->mmap_sem there, to prevent 476 locking inversion, so don't take it here */ 477 478 err = videobuf_dma_init_user_locked(&mem->dma, 479 DMA_FROM_DEVICE, 480 vb->baddr, vb->bsize); 481 if (0 != err) 482 return err; 483 } 484 break; 485 case V4L2_MEMORY_OVERLAY: 486 if (NULL == fbuf) 487 return -EINVAL; 488 /* FIXME: need sanity checks for vb->boff */ 489 /* 490 * Using a double cast to avoid compiler warnings when 491 * building for PAE. Compiler doesn't like direct casting 492 * of a 32 bit ptr to 64 bit integer. 493 */ 494 bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff; 495 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; 496 err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE, 497 bus, pages); 498 if (0 != err) 499 return err; 500 break; 501 default: 502 BUG(); 503 } 504 err = videobuf_dma_map(q, &mem->dma); 505 if (0 != err) 506 return err; 507 508 return 0; 509} 510 511static int __videobuf_sync(struct videobuf_queue *q, 512 struct videobuf_buffer *buf) 513{ 514 struct videobuf_dma_sg_memory *mem = buf->priv; 515 BUG_ON(!mem); 516 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM); 517 518 return videobuf_dma_sync(q,&mem->dma); 519} 520 521static int __videobuf_mmap_free(struct videobuf_queue *q) 522{ 523 int i; 524 525 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 526 if (q->bufs[i]) { 527 if (q->bufs[i]->map) 528 return -EBUSY; 529 } 530 } 531 532 return 0; 533} 534 535static int __videobuf_mmap_mapper(struct videobuf_queue *q, 536 struct vm_area_struct *vma) 537{ 538 struct videobuf_dma_sg_memory *mem; 539 struct videobuf_mapping *map; 540 unsigned int first,last,size,i; 541 int retval; 542 543 retval = -EINVAL; 544 if (!(vma->vm_flags & VM_WRITE)) { 545 dprintk(1,"mmap app bug: PROT_WRITE please\n"); 546 goto done; 547 } 548 if (!(vma->vm_flags & VM_SHARED)) { 549 dprintk(1,"mmap app bug: MAP_SHARED please\n"); 550 goto done; 551 } 552 553 /* This function maintains backwards compatibility with V4L1 and will 554 * map more than one buffer if the vma length is equal to the combined 555 * size of multiple buffers than it will map them together. See 556 * VIDIOCGMBUF in the v4l spec 557 * 558 * TODO: Allow drivers to specify if they support this mode 559 */ 560 561 /* look for first buffer to map */ 562 for (first = 0; first < VIDEO_MAX_FRAME; first++) { 563 if (NULL == q->bufs[first]) 564 continue; 565 mem=q->bufs[first]->priv; 566 BUG_ON(!mem); 567 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM); 568 569 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory) 570 continue; 571 if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT)) 572 break; 573 } 574 if (VIDEO_MAX_FRAME == first) { 575 dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n", 576 (vma->vm_pgoff << PAGE_SHIFT)); 577 goto done; 578 } 579 580 /* look for last buffer to map */ 581 for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) { 582 if (NULL == q->bufs[last]) 583 continue; 584 if (V4L2_MEMORY_MMAP != q->bufs[last]->memory) 585 continue; 586 if (q->bufs[last]->map) { 587 retval = -EBUSY; 588 goto done; 589 } 590 size += q->bufs[last]->bsize; 591 if (size == (vma->vm_end - vma->vm_start)) 592 break; 593 } 594 if (VIDEO_MAX_FRAME == last) { 595 dprintk(1,"mmap app bug: size invalid [size=0x%lx]\n", 596 (vma->vm_end - vma->vm_start)); 597 goto done; 598 } 599 600 /* create mapping + update buffer list */ 601 retval = -ENOMEM; 602 map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL); 603 if (NULL == map) 604 goto done; 605 606 size = 0; 607 for (i = first; i <= last; i++) { 608 if (NULL == q->bufs[i]) 609 continue; 610 q->bufs[i]->map = map; 611 q->bufs[i]->baddr = vma->vm_start + size; 612 size += q->bufs[i]->bsize; 613 } 614 615 map->count = 1; 616 map->start = vma->vm_start; 617 map->end = vma->vm_end; 618 map->q = q; 619 vma->vm_ops = &videobuf_vm_ops; 620 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; 621 vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */ 622 vma->vm_private_data = map; 623 dprintk(1,"mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n", 624 map,q,vma->vm_start,vma->vm_end,vma->vm_pgoff,first,last); 625 retval = 0; 626 627 done: 628 return retval; 629} 630 631static int __videobuf_copy_to_user ( struct videobuf_queue *q, 632 char __user *data, size_t count, 633 int nonblocking ) 634{ 635 struct videobuf_dma_sg_memory *mem = q->read_buf->priv; 636 BUG_ON(!mem); 637 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM); 638 639 /* copy to userspace */ 640 if (count > q->read_buf->size - q->read_off) 641 count = q->read_buf->size - q->read_off; 642 643 if (copy_to_user(data, mem->dma.vmalloc+q->read_off, count)) 644 return -EFAULT; 645 646 return count; 647} 648 649static int __videobuf_copy_stream ( struct videobuf_queue *q, 650 char __user *data, size_t count, size_t pos, 651 int vbihack, int nonblocking ) 652{ 653 unsigned int *fc; 654 struct videobuf_dma_sg_memory *mem = q->read_buf->priv; 655 BUG_ON(!mem); 656 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM); 657 658 if (vbihack) { 659 /* dirty, undocumented hack -- pass the frame counter 660 * within the last four bytes of each vbi data block. 661 * We need that one to maintain backward compatibility 662 * to all vbi decoding software out there ... */ 663 fc = (unsigned int*)mem->dma.vmalloc; 664 fc += (q->read_buf->size>>2) -1; 665 *fc = q->read_buf->field_count >> 1; 666 dprintk(1,"vbihack: %d\n",*fc); 667 } 668 669 /* copy stuff using the common method */ 670 count = __videobuf_copy_to_user (q,data,count,nonblocking); 671 672 if ( (count==-EFAULT) && (0 == pos) ) 673 return -EFAULT; 674 675 return count; 676} 677 678static struct videobuf_qtype_ops sg_ops = { 679 .magic = MAGIC_QTYPE_OPS, 680 681 .alloc = __videobuf_alloc, 682 .iolock = __videobuf_iolock, 683 .sync = __videobuf_sync, 684 .mmap_free = __videobuf_mmap_free, 685 .mmap_mapper = __videobuf_mmap_mapper, 686 .video_copy_to_user = __videobuf_copy_to_user, 687 .copy_stream = __videobuf_copy_stream, 688 .vmalloc = __videobuf_to_vmalloc, 689}; 690 691void *videobuf_sg_alloc(size_t size) 692{ 693 struct videobuf_queue q; 694 695 /* Required to make generic handler to call __videobuf_alloc */ 696 q.int_ops = &sg_ops; 697 698 q.msize = size; 699 700 return videobuf_alloc(&q); 701} 702 703void videobuf_queue_sg_init(struct videobuf_queue* q, 704 struct videobuf_queue_ops *ops, 705 struct device *dev, 706 spinlock_t *irqlock, 707 enum v4l2_buf_type type, 708 enum v4l2_field field, 709 unsigned int msize, 710 void *priv) 711{ 712 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize, 713 priv, &sg_ops); 714} 715 716/* --------------------------------------------------------------------- */ 717 718EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg); 719 720EXPORT_SYMBOL_GPL(videobuf_to_dma); 721EXPORT_SYMBOL_GPL(videobuf_dma_init); 722EXPORT_SYMBOL_GPL(videobuf_dma_init_user); 723EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel); 724EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay); 725EXPORT_SYMBOL_GPL(videobuf_dma_map); 726EXPORT_SYMBOL_GPL(videobuf_dma_sync); 727EXPORT_SYMBOL_GPL(videobuf_dma_unmap); 728EXPORT_SYMBOL_GPL(videobuf_dma_free); 729 730EXPORT_SYMBOL_GPL(videobuf_sg_dma_map); 731EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap); 732EXPORT_SYMBOL_GPL(videobuf_sg_alloc); 733 734EXPORT_SYMBOL_GPL(videobuf_queue_sg_init); 735 736/* 737 * Local variables: 738 * c-basic-offset: 8 739 * End: 740 */