Showing error 766

User: Jiri Slaby
Error type: Invalid Pointer Dereference
Error type description: A pointer which is invalid is being dereferenced
File location: drivers/infiniband/hw/ipath/ipath_mr.c
Line in file: 332
Project: Linux Kernel
Project version: 2.6.28
Tools: Stanse (1.2)
Entered: 2011-11-07 22:22:22 UTC


Source:

  1/*
  2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *      - Redistributions of source code must retain the above
 16 *        copyright notice, this list of conditions and the following
 17 *        disclaimer.
 18 *
 19 *      - Redistributions in binary form must reproduce the above
 20 *        copyright notice, this list of conditions and the following
 21 *        disclaimer in the documentation and/or other materials
 22 *        provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 34#include <rdma/ib_umem.h>
 35#include <rdma/ib_pack.h>
 36#include <rdma/ib_smi.h>
 37
 38#include "ipath_verbs.h"
 39
 40/* Fast memory region */
 41struct ipath_fmr {
 42        struct ib_fmr ibfmr;
 43        u8 page_shift;
 44        struct ipath_mregion mr;        /* must be last */
 45};
 46
 47static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
 48{
 49        return container_of(ibfmr, struct ipath_fmr, ibfmr);
 50}
 51
 52/**
 53 * ipath_get_dma_mr - get a DMA memory region
 54 * @pd: protection domain for this memory region
 55 * @acc: access flags
 56 *
 57 * Returns the memory region on success, otherwise returns an errno.
 58 * Note that all DMA addresses should be created via the
 59 * struct ib_dma_mapping_ops functions (see ipath_dma.c).
 60 */
 61struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
 62{
 63        struct ipath_mr *mr;
 64        struct ib_mr *ret;
 65
 66        mr = kzalloc(sizeof *mr, GFP_KERNEL);
 67        if (!mr) {
 68                ret = ERR_PTR(-ENOMEM);
 69                goto bail;
 70        }
 71
 72        mr->mr.access_flags = acc;
 73        ret = &mr->ibmr;
 74
 75bail:
 76        return ret;
 77}
 78
 79static struct ipath_mr *alloc_mr(int count,
 80                                 struct ipath_lkey_table *lk_table)
 81{
 82        struct ipath_mr *mr;
 83        int m, i = 0;
 84
 85        /* Allocate struct plus pointers to first level page tables. */
 86        m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
 87        mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
 88        if (!mr)
 89                goto done;
 90
 91        /* Allocate first level page tables. */
 92        for (; i < m; i++) {
 93                mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
 94                if (!mr->mr.map[i])
 95                        goto bail;
 96        }
 97        mr->mr.mapsz = m;
 98
 99        /*
100         * ib_reg_phys_mr() will initialize mr->ibmr except for
101         * lkey and rkey.
102         */
103        if (!ipath_alloc_lkey(lk_table, &mr->mr))
104                goto bail;
105        mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
106
107        goto done;
108
109bail:
110        while (i) {
111                i--;
112                kfree(mr->mr.map[i]);
113        }
114        kfree(mr);
115        mr = NULL;
116
117done:
118        return mr;
119}
120
121/**
122 * ipath_reg_phys_mr - register a physical memory region
123 * @pd: protection domain for this memory region
124 * @buffer_list: pointer to the list of physical buffers to register
125 * @num_phys_buf: the number of physical buffers to register
126 * @iova_start: the starting address passed over IB which maps to this MR
127 *
128 * Returns the memory region on success, otherwise returns an errno.
129 */
130struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
131                                struct ib_phys_buf *buffer_list,
132                                int num_phys_buf, int acc, u64 *iova_start)
133{
134        struct ipath_mr *mr;
135        int n, m, i;
136        struct ib_mr *ret;
137
138        mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
139        if (mr == NULL) {
140                ret = ERR_PTR(-ENOMEM);
141                goto bail;
142        }
143
144        mr->mr.pd = pd;
145        mr->mr.user_base = *iova_start;
146        mr->mr.iova = *iova_start;
147        mr->mr.length = 0;
148        mr->mr.offset = 0;
149        mr->mr.access_flags = acc;
150        mr->mr.max_segs = num_phys_buf;
151        mr->umem = NULL;
152
153        m = 0;
154        n = 0;
155        for (i = 0; i < num_phys_buf; i++) {
156                mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
157                mr->mr.map[m]->segs[n].length = buffer_list[i].size;
158                mr->mr.length += buffer_list[i].size;
159                n++;
160                if (n == IPATH_SEGSZ) {
161                        m++;
162                        n = 0;
163                }
164        }
165
166        ret = &mr->ibmr;
167
168bail:
169        return ret;
170}
171
172/**
173 * ipath_reg_user_mr - register a userspace memory region
174 * @pd: protection domain for this memory region
175 * @start: starting userspace address
176 * @length: length of region to register
177 * @virt_addr: virtual address to use (from HCA's point of view)
178 * @mr_access_flags: access flags for this memory region
179 * @udata: unused by the InfiniPath driver
180 *
181 * Returns the memory region on success, otherwise returns an errno.
182 */
183struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
184                                u64 virt_addr, int mr_access_flags,
185                                struct ib_udata *udata)
186{
187        struct ipath_mr *mr;
188        struct ib_umem *umem;
189        struct ib_umem_chunk *chunk;
190        int n, m, i;
191        struct ib_mr *ret;
192
193        if (length == 0) {
194                ret = ERR_PTR(-EINVAL);
195                goto bail;
196        }
197
198        umem = ib_umem_get(pd->uobject->context, start, length,
199                           mr_access_flags, 0);
200        if (IS_ERR(umem))
201                return (void *) umem;
202
203        n = 0;
204        list_for_each_entry(chunk, &umem->chunk_list, list)
205                n += chunk->nents;
206
207        mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
208        if (!mr) {
209                ret = ERR_PTR(-ENOMEM);
210                ib_umem_release(umem);
211                goto bail;
212        }
213
214        mr->mr.pd = pd;
215        mr->mr.user_base = start;
216        mr->mr.iova = virt_addr;
217        mr->mr.length = length;
218        mr->mr.offset = umem->offset;
219        mr->mr.access_flags = mr_access_flags;
220        mr->mr.max_segs = n;
221        mr->umem = umem;
222
223        m = 0;
224        n = 0;
225        list_for_each_entry(chunk, &umem->chunk_list, list) {
226                for (i = 0; i < chunk->nents; i++) {
227                        void *vaddr;
228
229                        vaddr = page_address(sg_page(&chunk->page_list[i]));
230                        if (!vaddr) {
231                                ret = ERR_PTR(-EINVAL);
232                                goto bail;
233                        }
234                        mr->mr.map[m]->segs[n].vaddr = vaddr;
235                        mr->mr.map[m]->segs[n].length = umem->page_size;
236                        n++;
237                        if (n == IPATH_SEGSZ) {
238                                m++;
239                                n = 0;
240                        }
241                }
242        }
243        ret = &mr->ibmr;
244
245bail:
246        return ret;
247}
248
249/**
250 * ipath_dereg_mr - unregister and free a memory region
251 * @ibmr: the memory region to free
252 *
253 * Returns 0 on success.
254 *
255 * Note that this is called to free MRs created by ipath_get_dma_mr()
256 * or ipath_reg_user_mr().
257 */
258int ipath_dereg_mr(struct ib_mr *ibmr)
259{
260        struct ipath_mr *mr = to_imr(ibmr);
261        int i;
262
263        ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
264        i = mr->mr.mapsz;
265        while (i) {
266                i--;
267                kfree(mr->mr.map[i]);
268        }
269
270        if (mr->umem)
271                ib_umem_release(mr->umem);
272
273        kfree(mr);
274        return 0;
275}
276
277/**
278 * ipath_alloc_fmr - allocate a fast memory region
279 * @pd: the protection domain for this memory region
280 * @mr_access_flags: access flags for this memory region
281 * @fmr_attr: fast memory region attributes
282 *
283 * Returns the memory region on success, otherwise returns an errno.
284 */
285struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
286                               struct ib_fmr_attr *fmr_attr)
287{
288        struct ipath_fmr *fmr;
289        int m, i = 0;
290        struct ib_fmr *ret;
291
292        /* Allocate struct plus pointers to first level page tables. */
293        m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
294        fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
295        if (!fmr)
296                goto bail;
297
298        /* Allocate first level page tables. */
299        for (; i < m; i++) {
300                fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
301                                         GFP_KERNEL);
302                if (!fmr->mr.map[i])
303                        goto bail;
304        }
305        fmr->mr.mapsz = m;
306
307        /*
308         * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
309         * rkey.
310         */
311        if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
312                goto bail;
313        fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
314        /*
315         * Resources are allocated but no valid mapping (RKEY can't be
316         * used).
317         */
318        fmr->mr.pd = pd;
319        fmr->mr.user_base = 0;
320        fmr->mr.iova = 0;
321        fmr->mr.length = 0;
322        fmr->mr.offset = 0;
323        fmr->mr.access_flags = mr_access_flags;
324        fmr->mr.max_segs = fmr_attr->max_pages;
325        fmr->page_shift = fmr_attr->page_shift;
326
327        ret = &fmr->ibfmr;
328        goto done;
329
330bail:
331        while (i)
332                kfree(fmr->mr.map[--i]);
333        kfree(fmr);
334        ret = ERR_PTR(-ENOMEM);
335
336done:
337        return ret;
338}
339
340/**
341 * ipath_map_phys_fmr - set up a fast memory region
342 * @ibmfr: the fast memory region to set up
343 * @page_list: the list of pages to associate with the fast memory region
344 * @list_len: the number of pages to associate with the fast memory region
345 * @iova: the virtual address of the start of the fast memory region
346 *
347 * This may be called from interrupt context.
348 */
349
350int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
351                       int list_len, u64 iova)
352{
353        struct ipath_fmr *fmr = to_ifmr(ibfmr);
354        struct ipath_lkey_table *rkt;
355        unsigned long flags;
356        int m, n, i;
357        u32 ps;
358        int ret;
359
360        if (list_len > fmr->mr.max_segs) {
361                ret = -EINVAL;
362                goto bail;
363        }
364        rkt = &to_idev(ibfmr->device)->lk_table;
365        spin_lock_irqsave(&rkt->lock, flags);
366        fmr->mr.user_base = iova;
367        fmr->mr.iova = iova;
368        ps = 1 << fmr->page_shift;
369        fmr->mr.length = list_len * ps;
370        m = 0;
371        n = 0;
372        ps = 1 << fmr->page_shift;
373        for (i = 0; i < list_len; i++) {
374                fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
375                fmr->mr.map[m]->segs[n].length = ps;
376                if (++n == IPATH_SEGSZ) {
377                        m++;
378                        n = 0;
379                }
380        }
381        spin_unlock_irqrestore(&rkt->lock, flags);
382        ret = 0;
383
384bail:
385        return ret;
386}
387
388/**
389 * ipath_unmap_fmr - unmap fast memory regions
390 * @fmr_list: the list of fast memory regions to unmap
391 *
392 * Returns 0 on success.
393 */
394int ipath_unmap_fmr(struct list_head *fmr_list)
395{
396        struct ipath_fmr *fmr;
397        struct ipath_lkey_table *rkt;
398        unsigned long flags;
399
400        list_for_each_entry(fmr, fmr_list, ibfmr.list) {
401                rkt = &to_idev(fmr->ibfmr.device)->lk_table;
402                spin_lock_irqsave(&rkt->lock, flags);
403                fmr->mr.user_base = 0;
404                fmr->mr.iova = 0;
405                fmr->mr.length = 0;
406                spin_unlock_irqrestore(&rkt->lock, flags);
407        }
408        return 0;
409}
410
411/**
412 * ipath_dealloc_fmr - deallocate a fast memory region
413 * @ibfmr: the fast memory region to deallocate
414 *
415 * Returns 0 on success.
416 */
417int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
418{
419        struct ipath_fmr *fmr = to_ifmr(ibfmr);
420        int i;
421
422        ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
423        i = fmr->mr.mapsz;
424        while (i)
425                kfree(fmr->mr.map[--i]);
426        kfree(fmr);
427        return 0;
428}