Showing error 1672

User: Jiri Slaby
Error type: Invalid Pointer Dereference
Error type description: A pointer which is invalid is being dereferenced
File location: fs/xfs/xfs_itable.c
Line in file: 874
Project: Linux Kernel
Project version: 2.6.28
Tools: Smatch (1.59)
Entered: 2013-09-10 07:54:05 UTC


Source:

  1/*
  2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License as
  7 * published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it would be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, write the Free Software Foundation,
 16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 17 */
 18#include "xfs.h"
 19#include "xfs_fs.h"
 20#include "xfs_types.h"
 21#include "xfs_bit.h"
 22#include "xfs_log.h"
 23#include "xfs_inum.h"
 24#include "xfs_trans.h"
 25#include "xfs_sb.h"
 26#include "xfs_ag.h"
 27#include "xfs_dir2.h"
 28#include "xfs_dmapi.h"
 29#include "xfs_mount.h"
 30#include "xfs_bmap_btree.h"
 31#include "xfs_alloc_btree.h"
 32#include "xfs_ialloc_btree.h"
 33#include "xfs_dir2_sf.h"
 34#include "xfs_attr_sf.h"
 35#include "xfs_dinode.h"
 36#include "xfs_inode.h"
 37#include "xfs_ialloc.h"
 38#include "xfs_itable.h"
 39#include "xfs_error.h"
 40#include "xfs_btree.h"
 41
 42int
 43xfs_internal_inum(
 44        xfs_mount_t        *mp,
 45        xfs_ino_t        ino)
 46{
 47        return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
 48                (xfs_sb_version_hasquota(&mp->m_sb) &&
 49                 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
 50}
 51
 52STATIC int
 53xfs_bulkstat_one_iget(
 54        xfs_mount_t        *mp,                /* mount point for filesystem */
 55        xfs_ino_t        ino,                /* inode number to get data for */
 56        xfs_daddr_t        bno,                /* starting bno of inode cluster */
 57        xfs_bstat_t        *buf,                /* return buffer */
 58        int                *stat)                /* BULKSTAT_RV_... */
 59{
 60        xfs_icdinode_t        *dic;        /* dinode core info pointer */
 61        xfs_inode_t        *ip;                /* incore inode pointer */
 62        int                error;
 63
 64        error = xfs_iget(mp, NULL, ino,
 65                         XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno);
 66        if (error) {
 67                *stat = BULKSTAT_RV_NOTHING;
 68                return error;
 69        }
 70
 71        ASSERT(ip != NULL);
 72        ASSERT(ip->i_blkno != (xfs_daddr_t)0);
 73
 74        dic = &ip->i_d;
 75
 76        /* xfs_iget returns the following without needing
 77         * further change.
 78         */
 79        buf->bs_nlink = dic->di_nlink;
 80        buf->bs_projid = dic->di_projid;
 81        buf->bs_ino = ino;
 82        buf->bs_mode = dic->di_mode;
 83        buf->bs_uid = dic->di_uid;
 84        buf->bs_gid = dic->di_gid;
 85        buf->bs_size = dic->di_size;
 86        vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime);
 87        buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
 88        buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
 89        buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
 90        buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
 91        buf->bs_xflags = xfs_ip2xflags(ip);
 92        buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
 93        buf->bs_extents = dic->di_nextents;
 94        buf->bs_gen = dic->di_gen;
 95        memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
 96        buf->bs_dmevmask = dic->di_dmevmask;
 97        buf->bs_dmstate = dic->di_dmstate;
 98        buf->bs_aextents = dic->di_anextents;
 99
100        switch (dic->di_format) {
101        case XFS_DINODE_FMT_DEV:
102                buf->bs_rdev = ip->i_df.if_u2.if_rdev;
103                buf->bs_blksize = BLKDEV_IOSIZE;
104                buf->bs_blocks = 0;
105                break;
106        case XFS_DINODE_FMT_LOCAL:
107        case XFS_DINODE_FMT_UUID:
108                buf->bs_rdev = 0;
109                buf->bs_blksize = mp->m_sb.sb_blocksize;
110                buf->bs_blocks = 0;
111                break;
112        case XFS_DINODE_FMT_EXTENTS:
113        case XFS_DINODE_FMT_BTREE:
114                buf->bs_rdev = 0;
115                buf->bs_blksize = mp->m_sb.sb_blocksize;
116                buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
117                break;
118        }
119
120        xfs_iput(ip, XFS_ILOCK_SHARED);
121        return error;
122}
123
124STATIC void
125xfs_bulkstat_one_dinode(
126        xfs_mount_t        *mp,                /* mount point for filesystem */
127        xfs_ino_t        ino,                /* inode number to get data for */
128        xfs_dinode_t        *dip,                /* dinode inode pointer */
129        xfs_bstat_t        *buf)                /* return buffer */
130{
131        xfs_dinode_core_t *dic;                /* dinode core info pointer */
132
133        dic = &dip->di_core;
134
135        /*
136         * The inode format changed when we moved the link count and
137         * made it 32 bits long.  If this is an old format inode,
138         * convert it in memory to look like a new one.  If it gets
139         * flushed to disk we will convert back before flushing or
140         * logging it.  We zero out the new projid field and the old link
141         * count field.  We'll handle clearing the pad field (the remains
142         * of the old uuid field) when we actually convert the inode to
143         * the new format. We don't change the version number so that we
144         * can distinguish this from a real new format inode.
145         */
146        if (dic->di_version == XFS_DINODE_VERSION_1) {
147                buf->bs_nlink = be16_to_cpu(dic->di_onlink);
148                buf->bs_projid = 0;
149        } else {
150                buf->bs_nlink = be32_to_cpu(dic->di_nlink);
151                buf->bs_projid = be16_to_cpu(dic->di_projid);
152        }
153
154        buf->bs_ino = ino;
155        buf->bs_mode = be16_to_cpu(dic->di_mode);
156        buf->bs_uid = be32_to_cpu(dic->di_uid);
157        buf->bs_gid = be32_to_cpu(dic->di_gid);
158        buf->bs_size = be64_to_cpu(dic->di_size);
159        buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec);
160        buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec);
161        buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec);
162        buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec);
163        buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec);
164        buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec);
165        buf->bs_xflags = xfs_dic2xflags(dip);
166        buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog;
167        buf->bs_extents = be32_to_cpu(dic->di_nextents);
168        buf->bs_gen = be32_to_cpu(dic->di_gen);
169        memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
170        buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask);
171        buf->bs_dmstate = be16_to_cpu(dic->di_dmstate);
172        buf->bs_aextents = be16_to_cpu(dic->di_anextents);
173
174        switch (dic->di_format) {
175        case XFS_DINODE_FMT_DEV:
176                buf->bs_rdev = be32_to_cpu(dip->di_u.di_dev);
177                buf->bs_blksize = BLKDEV_IOSIZE;
178                buf->bs_blocks = 0;
179                break;
180        case XFS_DINODE_FMT_LOCAL:
181        case XFS_DINODE_FMT_UUID:
182                buf->bs_rdev = 0;
183                buf->bs_blksize = mp->m_sb.sb_blocksize;
184                buf->bs_blocks = 0;
185                break;
186        case XFS_DINODE_FMT_EXTENTS:
187        case XFS_DINODE_FMT_BTREE:
188                buf->bs_rdev = 0;
189                buf->bs_blksize = mp->m_sb.sb_blocksize;
190                buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
191                break;
192        }
193}
194
195STATIC int
196xfs_bulkstat_one_fmt(
197        void                        __user *ubuffer,
198        const xfs_bstat_t        *buffer)
199{
200        if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
201                return -EFAULT;
202        return sizeof(*buffer);
203}
204
205/*
206 * Return stat information for one inode.
207 * Return 0 if ok, else errno.
208 */
209int                                       /* error status */
210xfs_bulkstat_one(
211        xfs_mount_t        *mp,                /* mount point for filesystem */
212        xfs_ino_t        ino,                /* inode number to get data for */
213        void                __user *buffer,        /* buffer to place output in */
214        int                ubsize,                /* size of buffer */
215        void                *private_data,        /* my private data */
216        xfs_daddr_t        bno,                /* starting bno of inode cluster */
217        int                *ubused,        /* bytes used by me */
218        void                *dibuff,        /* on-disk inode buffer */
219        int                *stat)                /* BULKSTAT_RV_... */
220{
221        xfs_bstat_t        *buf;                /* return buffer */
222        int                error = 0;        /* error value */
223        xfs_dinode_t        *dip;                /* dinode inode pointer */
224        bulkstat_one_fmt_pf formatter = private_data ? : xfs_bulkstat_one_fmt;
225
226        dip = (xfs_dinode_t *)dibuff;
227        *stat = BULKSTAT_RV_NOTHING;
228
229        if (!buffer || xfs_internal_inum(mp, ino))
230                return XFS_ERROR(EINVAL);
231        if (ubsize < sizeof(*buf))
232                return XFS_ERROR(ENOMEM);
233
234        buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
235
236        if (dip == NULL) {
237                /* We're not being passed a pointer to a dinode.  This happens
238                 * if BULKSTAT_FG_IGET is selected.  Do the iget.
239                 */
240                error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
241                if (error)
242                        goto out_free;
243        } else {
244                xfs_bulkstat_one_dinode(mp, ino, dip, buf);
245        }
246
247        error = formatter(buffer, buf);
248        if (error < 0)  {
249                error = EFAULT;
250                goto out_free;
251        }
252
253        *stat = BULKSTAT_RV_DIDONE;
254        if (ubused)
255                *ubused = error;
256
257 out_free:
258        kmem_free(buf);
259        return error;
260}
261
262/*
263 * Test to see whether we can use the ondisk inode directly, based
264 * on the given bulkstat flags, filling in dipp accordingly.
265 * Returns zero if the inode is dodgey.
266 */
267STATIC int
268xfs_bulkstat_use_dinode(
269        xfs_mount_t        *mp,
270        int                flags,
271        xfs_buf_t        *bp,
272        int                clustidx,
273        xfs_dinode_t        **dipp)
274{
275        xfs_dinode_t        *dip;
276        unsigned int        aformat;
277
278        *dipp = NULL;
279        if (!bp || (flags & BULKSTAT_FG_IGET))
280                return 1;
281        dip = (xfs_dinode_t *)
282                        xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
283        /*
284         * Check the buffer containing the on-disk inode for di_mode == 0.
285         * This is to prevent xfs_bulkstat from picking up just reclaimed
286         * inodes that have their in-core state initialized but not flushed
287         * to disk yet. This is a temporary hack that would require a proper
288         * fix in the future.
289         */
290        if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC ||
291            !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) ||
292            !dip->di_core.di_mode)
293                return 0;
294        if (flags & BULKSTAT_FG_QUICK) {
295                *dipp = dip;
296                return 1;
297        }
298        /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */
299        aformat = dip->di_core.di_aformat;
300        if ((XFS_DFORK_Q(dip) == 0) ||
301            (aformat == XFS_DINODE_FMT_LOCAL) ||
302            (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) {
303                *dipp = dip;
304                return 1;
305        }
306        return 1;
307}
308
309#define XFS_BULKSTAT_UBLEFT(ubleft)        ((ubleft) >= statstruct_size)
310
311/*
312 * Return stat information in bulk (by-inode) for the filesystem.
313 */
314int                                        /* error status */
315xfs_bulkstat(
316        xfs_mount_t                *mp,        /* mount point for filesystem */
317        xfs_ino_t                *lastinop, /* last inode returned */
318        int                        *ubcountp, /* size of buffer/count returned */
319        bulkstat_one_pf                formatter, /* func that'd fill a single buf */
320        void                        *private_data,/* private data for formatter */
321        size_t                        statstruct_size, /* sizeof struct filling */
322        char                        __user *ubuffer, /* buffer with inode stats */
323        int                        flags,        /* defined in xfs_itable.h */
324        int                        *done)        /* 1 if there are more stats to get */
325{
326        xfs_agblock_t                agbno=0;/* allocation group block number */
327        xfs_buf_t                *agbp;        /* agi header buffer */
328        xfs_agi_t                *agi;        /* agi header data */
329        xfs_agino_t                agino;        /* inode # in allocation group */
330        xfs_agnumber_t                agno;        /* allocation group number */
331        xfs_daddr_t                bno;        /* inode cluster start daddr */
332        int                        chunkidx; /* current index into inode chunk */
333        int                        clustidx; /* current index into inode cluster */
334        xfs_btree_cur_t                *cur;        /* btree cursor for ialloc btree */
335        int                        end_of_ag; /* set if we've seen the ag end */
336        int                        error;        /* error code */
337        int                     fmterror;/* bulkstat formatter result */
338        __int32_t                gcnt;        /* current btree rec's count */
339        xfs_inofree_t                gfree;        /* current btree rec's free mask */
340        xfs_agino_t                gino;        /* current btree rec's start inode */
341        int                        i;        /* loop index */
342        int                        icount;        /* count of inodes good in irbuf */
343        size_t                        irbsize; /* size of irec buffer in bytes */
344        xfs_ino_t                ino;        /* inode number (filesystem) */
345        xfs_inobt_rec_incore_t        *irbp;        /* current irec buffer pointer */
346        xfs_inobt_rec_incore_t        *irbuf;        /* start of irec buffer */
347        xfs_inobt_rec_incore_t        *irbufend; /* end of good irec buffer entries */
348        xfs_ino_t                lastino; /* last inode number returned */
349        int                        nbcluster; /* # of blocks in a cluster */
350        int                        nicluster; /* # of inodes in a cluster */
351        int                        nimask;        /* mask for inode clusters */
352        int                        nirbuf;        /* size of irbuf */
353        int                        rval;        /* return value error code */
354        int                        tmp;        /* result value from btree calls */
355        int                        ubcount; /* size of user's buffer */
356        int                        ubleft;        /* bytes left in user's buffer */
357        char                        __user *ubufp;        /* pointer into user's buffer */
358        int                        ubelem;        /* spaces used in user's buffer */
359        int                        ubused;        /* bytes used by formatter */
360        xfs_buf_t                *bp;        /* ptr to on-disk inode cluster buf */
361        xfs_dinode_t                *dip;        /* ptr into bp for specific inode */
362        xfs_inode_t                *ip;        /* ptr to in-core inode struct */
363
364        /*
365         * Get the last inode value, see if there's nothing to do.
366         */
367        ino = (xfs_ino_t)*lastinop;
368        lastino = ino;
369        dip = NULL;
370        agno = XFS_INO_TO_AGNO(mp, ino);
371        agino = XFS_INO_TO_AGINO(mp, ino);
372        if (agno >= mp->m_sb.sb_agcount ||
373            ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
374                *done = 1;
375                *ubcountp = 0;
376                return 0;
377        }
378        if (!ubcountp || *ubcountp <= 0) {
379                return EINVAL;
380        }
381        ubcount = *ubcountp; /* statstruct's */
382        ubleft = ubcount * statstruct_size; /* bytes */
383        *ubcountp = ubelem = 0;
384        *done = 0;
385        fmterror = 0;
386        ubufp = ubuffer;
387        nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ?
388                mp->m_sb.sb_inopblock :
389                (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
390        nimask = ~(nicluster - 1);
391        nbcluster = nicluster >> mp->m_sb.sb_inopblog;
392        irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4,
393                                   KM_SLEEP | KM_MAYFAIL | KM_LARGE);
394        nirbuf = irbsize / sizeof(*irbuf);
395
396        /*
397         * Loop over the allocation groups, starting from the last
398         * inode returned; 0 means start of the allocation group.
399         */
400        rval = 0;
401        while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
402                cond_resched();
403                bp = NULL;
404                down_read(&mp->m_peraglock);
405                error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
406                up_read(&mp->m_peraglock);
407                if (error) {
408                        /*
409                         * Skip this allocation group and go to the next one.
410                         */
411                        agno++;
412                        agino = 0;
413                        continue;
414                }
415                agi = XFS_BUF_TO_AGI(agbp);
416                /*
417                 * Allocate and initialize a btree cursor for ialloc btree.
418                 */
419                cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO,
420                                                (xfs_inode_t *)0, 0);
421                irbp = irbuf;
422                irbufend = irbuf + nirbuf;
423                end_of_ag = 0;
424                /*
425                 * If we're returning in the middle of an allocation group,
426                 * we need to get the remainder of the chunk we're in.
427                 */
428                if (agino > 0) {
429                        /*
430                         * Lookup the inode chunk that this inode lives in.
431                         */
432                        error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp);
433                        if (!error &&        /* no I/O error */
434                            tmp &&        /* lookup succeeded */
435                                        /* got the record, should always work */
436                            !(error = xfs_inobt_get_rec(cur, &gino, &gcnt,
437                                    &gfree, &i)) &&
438                            i == 1 &&
439                                        /* this is the right chunk */
440                            agino < gino + XFS_INODES_PER_CHUNK &&
441                                        /* lastino was not last in chunk */
442                            (chunkidx = agino - gino + 1) <
443                                    XFS_INODES_PER_CHUNK &&
444                                        /* there are some left allocated */
445                            XFS_INOBT_MASKN(chunkidx,
446                                    XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) {
447                                /*
448                                 * Grab the chunk record.  Mark all the
449                                 * uninteresting inodes (because they're
450                                 * before our start point) free.
451                                 */
452                                for (i = 0; i < chunkidx; i++) {
453                                        if (XFS_INOBT_MASK(i) & ~gfree)
454                                                gcnt++;
455                                }
456                                gfree |= XFS_INOBT_MASKN(0, chunkidx);
457                                irbp->ir_startino = gino;
458                                irbp->ir_freecount = gcnt;
459                                irbp->ir_free = gfree;
460                                irbp++;
461                                agino = gino + XFS_INODES_PER_CHUNK;
462                                icount = XFS_INODES_PER_CHUNK - gcnt;
463                        } else {
464                                /*
465                                 * If any of those tests failed, bump the
466                                 * inode number (just in case).
467                                 */
468                                agino++;
469                                icount = 0;
470                        }
471                        /*
472                         * In any case, increment to the next record.
473                         */
474                        if (!error)
475                                error = xfs_inobt_increment(cur, 0, &tmp);
476                } else {
477                        /*
478                         * Start of ag.  Lookup the first inode chunk.
479                         */
480                        error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp);
481                        icount = 0;
482                }
483                /*
484                 * Loop through inode btree records in this ag,
485                 * until we run out of inodes or space in the buffer.
486                 */
487                while (irbp < irbufend && icount < ubcount) {
488                        /*
489                         * Loop as long as we're unable to read the
490                         * inode btree.
491                         */
492                        while (error) {
493                                agino += XFS_INODES_PER_CHUNK;
494                                if (XFS_AGINO_TO_AGBNO(mp, agino) >=
495                                                be32_to_cpu(agi->agi_length))
496                                        break;
497                                error = xfs_inobt_lookup_ge(cur, agino, 0, 0,
498                                                            &tmp);
499                                cond_resched();
500                        }
501                        /*
502                         * If ran off the end of the ag either with an error,
503                         * or the normal way, set end and stop collecting.
504                         */
505                        if (error ||
506                            (error = xfs_inobt_get_rec(cur, &gino, &gcnt,
507                                    &gfree, &i)) ||
508                            i == 0) {
509                                end_of_ag = 1;
510                                break;
511                        }
512                        /*
513                         * If this chunk has any allocated inodes, save it.
514                         * Also start read-ahead now for this chunk.
515                         */
516                        if (gcnt < XFS_INODES_PER_CHUNK) {
517                                /*
518                                 * Loop over all clusters in the next chunk.
519                                 * Do a readahead if there are any allocated
520                                 * inodes in that cluster.
521                                 */
522                                for (agbno = XFS_AGINO_TO_AGBNO(mp, gino),
523                                     chunkidx = 0;
524                                     chunkidx < XFS_INODES_PER_CHUNK;
525                                     chunkidx += nicluster,
526                                     agbno += nbcluster) {
527                                        if (XFS_INOBT_MASKN(chunkidx,
528                                                            nicluster) & ~gfree)
529                                                xfs_btree_reada_bufs(mp, agno,
530                                                        agbno, nbcluster);
531                                }
532                                irbp->ir_startino = gino;
533                                irbp->ir_freecount = gcnt;
534                                irbp->ir_free = gfree;
535                                irbp++;
536                                icount += XFS_INODES_PER_CHUNK - gcnt;
537                        }
538                        /*
539                         * Set agino to after this chunk and bump the cursor.
540                         */
541                        agino = gino + XFS_INODES_PER_CHUNK;
542                        error = xfs_inobt_increment(cur, 0, &tmp);
543                        cond_resched();
544                }
545                /*
546                 * Drop the btree buffers and the agi buffer.
547                 * We can't hold any of the locks these represent
548                 * when calling iget.
549                 */
550                xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
551                xfs_buf_relse(agbp);
552                /*
553                 * Now format all the good inodes into the user's buffer.
554                 */
555                irbufend = irbp;
556                for (irbp = irbuf;
557                     irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
558                        /*
559                         * Now process this chunk of inodes.
560                         */
561                        for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
562                             XFS_BULKSTAT_UBLEFT(ubleft) &&
563                                irbp->ir_freecount < XFS_INODES_PER_CHUNK;
564                             chunkidx++, clustidx++, agino++) {
565                                ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
566                                /*
567                                 * Recompute agbno if this is the
568                                 * first inode of the cluster.
569                                 *
570                                 * Careful with clustidx.   There can be
571                                 * multple clusters per chunk, a single
572                                 * cluster per chunk or a cluster that has
573                                 * inodes represented from several different
574                                 * chunks (if blocksize is large).
575                                 *
576                                 * Because of this, the starting clustidx is
577                                 * initialized to zero in this loop but must
578                                 * later be reset after reading in the cluster
579                                 * buffer.
580                                 */
581                                if ((chunkidx & (nicluster - 1)) == 0) {
582                                        agbno = XFS_AGINO_TO_AGBNO(mp,
583                                                        irbp->ir_startino) +
584                                                ((chunkidx & nimask) >>
585                                                 mp->m_sb.sb_inopblog);
586
587                                        if (flags & (BULKSTAT_FG_QUICK |
588                                                     BULKSTAT_FG_INLINE)) {
589                                                ino = XFS_AGINO_TO_INO(mp, agno,
590                                                                       agino);
591                                                bno = XFS_AGB_TO_DADDR(mp, agno,
592                                                                       agbno);
593
594                                                /*
595                                                 * Get the inode cluster buffer
596                                                 */
597                                                ASSERT(xfs_inode_zone != NULL);
598                                                ip = kmem_zone_zalloc(xfs_inode_zone,
599                                                                      KM_SLEEP);
600                                                ip->i_ino = ino;
601                                                ip->i_mount = mp;
602                                                spin_lock_init(&ip->i_flags_lock);
603                                                if (bp)
604                                                        xfs_buf_relse(bp);
605                                                error = xfs_itobp(mp, NULL, ip,
606                                                                &dip, &bp, bno,
607                                                                XFS_IMAP_BULKSTAT,
608                                                                XFS_BUF_LOCK);
609                                                if (!error)
610                                                        clustidx = ip->i_boffset / mp->m_sb.sb_inodesize;
611                                                kmem_zone_free(xfs_inode_zone, ip);
612                                                if (XFS_TEST_ERROR(error != 0,
613                                                                   mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
614                                                                   XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
615                                                        bp = NULL;
616                                                        ubleft = 0;
617                                                        rval = error;
618                                                        break;
619                                                }
620                                        }
621                                }
622                                ino = XFS_AGINO_TO_INO(mp, agno, agino);
623                                bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
624                                /*
625                                 * Skip if this inode is free.
626                                 */
627                                if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
628                                        lastino = ino;
629                                        continue;
630                                }
631                                /*
632                                 * Count used inodes as free so we can tell
633                                 * when the chunk is used up.
634                                 */
635                                irbp->ir_freecount++;
636                                if (!xfs_bulkstat_use_dinode(mp, flags, bp,
637                                                             clustidx, &dip)) {
638                                        lastino = ino;
639                                        continue;
640                                }
641                                /*
642                                 * If we need to do an iget, cannot hold bp.
643                                 * Drop it, until starting the next cluster.
644                                 */
645                                if ((flags & BULKSTAT_FG_INLINE) && !dip) {
646                                        if (bp)
647                                                xfs_buf_relse(bp);
648                                        bp = NULL;
649                                }
650
651                                /*
652                                 * Get the inode and fill in a single buffer.
653                                 * BULKSTAT_FG_QUICK uses dip to fill it in.
654                                 * BULKSTAT_FG_IGET uses igets.
655                                 * BULKSTAT_FG_INLINE uses dip if we have an
656                                 * inline attr fork, else igets.
657                                 * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
658                                 * This is also used to count inodes/blks, etc
659                                 * in xfs_qm_quotacheck.
660                                 */
661                                ubused = statstruct_size;
662                                error = formatter(mp, ino, ubufp,
663                                                ubleft, private_data,
664                                                bno, &ubused, dip, &fmterror);
665                                if (fmterror == BULKSTAT_RV_NOTHING) {
666                                        if (error && error != ENOENT &&
667                                                error != EINVAL) {
668                                                ubleft = 0;
669                                                rval = error;
670                                                break;
671                                        }
672                                        lastino = ino;
673                                        continue;
674                                }
675                                if (fmterror == BULKSTAT_RV_GIVEUP) {
676                                        ubleft = 0;
677                                        ASSERT(error);
678                                        rval = error;
679                                        break;
680                                }
681                                if (ubufp)
682                                        ubufp += ubused;
683                                ubleft -= ubused;
684                                ubelem++;
685                                lastino = ino;
686                        }
687
688                        cond_resched();
689                }
690
691                if (bp)
692                        xfs_buf_relse(bp);
693
694                /*
695                 * Set up for the next loop iteration.
696                 */
697                if (XFS_BULKSTAT_UBLEFT(ubleft)) {
698                        if (end_of_ag) {
699                                agno++;
700                                agino = 0;
701                        } else
702                                agino = XFS_INO_TO_AGINO(mp, lastino);
703                } else
704                        break;
705        }
706        /*
707         * Done, we're either out of filesystem or space to put the data.
708         */
709        kmem_free(irbuf);
710        *ubcountp = ubelem;
711        /*
712         * Found some inodes, return them now and return the error next time.
713         */
714        if (ubelem)
715                rval = 0;
716        if (agno >= mp->m_sb.sb_agcount) {
717                /*
718                 * If we ran out of filesystem, mark lastino as off
719                 * the end of the filesystem, so the next call
720                 * will return immediately.
721                 */
722                *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
723                *done = 1;
724        } else
725                *lastinop = (xfs_ino_t)lastino;
726
727        return rval;
728}
729
730/*
731 * Return stat information in bulk (by-inode) for the filesystem.
732 * Special case for non-sequential one inode bulkstat.
733 */
734int                                        /* error status */
735xfs_bulkstat_single(
736        xfs_mount_t                *mp,        /* mount point for filesystem */
737        xfs_ino_t                *lastinop, /* inode to return */
738        char                        __user *buffer, /* buffer with inode stats */
739        int                        *done)        /* 1 if there are more stats to get */
740{
741        int                        count;        /* count value for bulkstat call */
742        int                        error;        /* return value */
743        xfs_ino_t                ino;        /* filesystem inode number */
744        int                        res;        /* result from bs1 */
745
746        /*
747         * note that requesting valid inode numbers which are not allocated
748         * to inodes will most likely cause xfs_itobp to generate warning
749         * messages about bad magic numbers. This is ok. The fact that
750         * the inode isn't actually an inode is handled by the
751         * error check below. Done this way to make the usual case faster
752         * at the expense of the error case.
753         */
754
755        ino = (xfs_ino_t)*lastinop;
756        error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
757                                 NULL, 0, NULL, NULL, &res);
758        if (error) {
759                /*
760                 * Special case way failed, do it the "long" way
761                 * to see if that works.
762                 */
763                (*lastinop)--;
764                count = 1;
765                if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
766                                NULL, sizeof(xfs_bstat_t), buffer,
767                                BULKSTAT_FG_IGET, done))
768                        return error;
769                if (count == 0 || (xfs_ino_t)*lastinop != ino)
770                        return error == EFSCORRUPTED ?
771                                XFS_ERROR(EINVAL) : error;
772                else
773                        return 0;
774        }
775        *done = 0;
776        return 0;
777}
778
779int
780xfs_inumbers_fmt(
781        void                        __user *ubuffer, /* buffer to write to */
782        const xfs_inogrp_t        *buffer,        /* buffer to read from */
783        long                        count,                /* # of elements to read */
784        long                        *written)        /* # of bytes written */
785{
786        if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
787                return -EFAULT;
788        *written = count * sizeof(*buffer);
789        return 0;
790}
791
792/*
793 * Return inode number table for the filesystem.
794 */
795int                                        /* error status */
796xfs_inumbers(
797        xfs_mount_t        *mp,                /* mount point for filesystem */
798        xfs_ino_t        *lastino,        /* last inode returned */
799        int                *count,                /* size of buffer/count returned */
800        void                __user *ubuffer,/* buffer with inode descriptions */
801        inumbers_fmt_pf        formatter)
802{
803        xfs_buf_t        *agbp;
804        xfs_agino_t        agino;
805        xfs_agnumber_t        agno;
806        int                bcount;
807        xfs_inogrp_t        *buffer;
808        int                bufidx;
809        xfs_btree_cur_t        *cur;
810        int                error;
811        __int32_t        gcnt;
812        xfs_inofree_t        gfree;
813        xfs_agino_t        gino;
814        int                i;
815        xfs_ino_t        ino;
816        int                left;
817        int                tmp;
818
819        ino = (xfs_ino_t)*lastino;
820        agno = XFS_INO_TO_AGNO(mp, ino);
821        agino = XFS_INO_TO_AGINO(mp, ino);
822        left = *count;
823        *count = 0;
824        bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
825        buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
826        error = bufidx = 0;
827        cur = NULL;
828        agbp = NULL;
829        while (left > 0 && agno < mp->m_sb.sb_agcount) {
830                if (agbp == NULL) {
831                        down_read(&mp->m_peraglock);
832                        error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
833                        up_read(&mp->m_peraglock);
834                        if (error) {
835                                /*
836                                 * If we can't read the AGI of this ag,
837                                 * then just skip to the next one.
838                                 */
839                                ASSERT(cur == NULL);
840                                agbp = NULL;
841                                agno++;
842                                agino = 0;
843                                continue;
844                        }
845                        cur = xfs_btree_init_cursor(mp, NULL, agbp, agno,
846                                XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
847                        error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp);
848                        if (error) {
849                                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
850                                cur = NULL;
851                                xfs_buf_relse(agbp);
852                                agbp = NULL;
853                                /*
854                                 * Move up the last inode in the current
855                                 * chunk.  The lookup_ge will always get
856                                 * us the first inode in the next chunk.
857                                 */
858                                agino += XFS_INODES_PER_CHUNK - 1;
859                                continue;
860                        }
861                }
862                if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree,
863                        &i)) ||
864                    i == 0) {
865                        xfs_buf_relse(agbp);
866                        agbp = NULL;
867                        xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
868                        cur = NULL;
869                        agno++;
870                        agino = 0;
871                        continue;
872                }
873                agino = gino + XFS_INODES_PER_CHUNK - 1;
874                buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino);
875                buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt;
876                buffer[bufidx].xi_allocmask = ~gfree;
877                bufidx++;
878                left--;
879                if (bufidx == bcount) {
880                        long written;
881                        if (formatter(ubuffer, buffer, bufidx, &written)) {
882                                error = XFS_ERROR(EFAULT);
883                                break;
884                        }
885                        ubuffer += written;
886                        *count += bufidx;
887                        bufidx = 0;
888                }
889                if (left) {
890                        error = xfs_inobt_increment(cur, 0, &tmp);
891                        if (error) {
892                                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
893                                cur = NULL;
894                                xfs_buf_relse(agbp);
895                                agbp = NULL;
896                                /*
897                                 * The agino value has already been bumped.
898                                 * Just try to skip up to it.
899                                 */
900                                agino += XFS_INODES_PER_CHUNK;
901                                continue;
902                        }
903                }
904        }
905        if (!error) {
906                if (bufidx) {
907                        long written;
908                        if (formatter(ubuffer, buffer, bufidx, &written))
909                                error = XFS_ERROR(EFAULT);
910                        else
911                                *count += bufidx;
912                }
913                *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
914        }
915        kmem_free(buffer);
916        if (cur)
917                xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
918                                           XFS_BTREE_NOERROR));
919        if (agbp)
920                xfs_buf_relse(agbp);
921        return error;
922}