1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
30#include "xfs_bmap_btree.h"
31#include "xfs_alloc_btree.h"
32#include "xfs_ialloc_btree.h"
33#include "xfs_dir2_sf.h"
34#include "xfs_attr_sf.h"
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
37#include "xfs_ialloc.h"
38#include "xfs_itable.h"
39#include "xfs_error.h"
40#include "xfs_btree.h"
41
42int
43xfs_internal_inum(
44 xfs_mount_t *mp,
45 xfs_ino_t ino)
46{
47 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
48 (xfs_sb_version_hasquota(&mp->m_sb) &&
49 (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
50}
51
52STATIC int
53xfs_bulkstat_one_iget(
54 xfs_mount_t *mp,
55 xfs_ino_t ino,
56 xfs_daddr_t bno,
57 xfs_bstat_t *buf,
58 int *stat)
59{
60 xfs_icdinode_t *dic;
61 xfs_inode_t *ip;
62 int error;
63
64 error = xfs_iget(mp, NULL, ino,
65 XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno);
66 if (error) {
67 *stat = BULKSTAT_RV_NOTHING;
68 return error;
69 }
70
71 ASSERT(ip != NULL);
72 ASSERT(ip->i_blkno != (xfs_daddr_t)0);
73
74 dic = &ip->i_d;
75
76
77
78
79 buf->bs_nlink = dic->di_nlink;
80 buf->bs_projid = dic->di_projid;
81 buf->bs_ino = ino;
82 buf->bs_mode = dic->di_mode;
83 buf->bs_uid = dic->di_uid;
84 buf->bs_gid = dic->di_gid;
85 buf->bs_size = dic->di_size;
86 vn_atime_to_bstime(VFS_I(ip), &buf->bs_atime);
87 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
88 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
89 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
90 buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
91 buf->bs_xflags = xfs_ip2xflags(ip);
92 buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
93 buf->bs_extents = dic->di_nextents;
94 buf->bs_gen = dic->di_gen;
95 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
96 buf->bs_dmevmask = dic->di_dmevmask;
97 buf->bs_dmstate = dic->di_dmstate;
98 buf->bs_aextents = dic->di_anextents;
99
100 switch (dic->di_format) {
101 case XFS_DINODE_FMT_DEV:
102 buf->bs_rdev = ip->i_df.if_u2.if_rdev;
103 buf->bs_blksize = BLKDEV_IOSIZE;
104 buf->bs_blocks = 0;
105 break;
106 case XFS_DINODE_FMT_LOCAL:
107 case XFS_DINODE_FMT_UUID:
108 buf->bs_rdev = 0;
109 buf->bs_blksize = mp->m_sb.sb_blocksize;
110 buf->bs_blocks = 0;
111 break;
112 case XFS_DINODE_FMT_EXTENTS:
113 case XFS_DINODE_FMT_BTREE:
114 buf->bs_rdev = 0;
115 buf->bs_blksize = mp->m_sb.sb_blocksize;
116 buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
117 break;
118 }
119
120 xfs_iput(ip, XFS_ILOCK_SHARED);
121 return error;
122}
123
124STATIC void
125xfs_bulkstat_one_dinode(
126 xfs_mount_t *mp,
127 xfs_ino_t ino,
128 xfs_dinode_t *dip,
129 xfs_bstat_t *buf)
130{
131 xfs_dinode_core_t *dic;
132
133 dic = &dip->di_core;
134
135
136
137
138
139
140
141
142
143
144
145
146 if (dic->di_version == XFS_DINODE_VERSION_1) {
147 buf->bs_nlink = be16_to_cpu(dic->di_onlink);
148 buf->bs_projid = 0;
149 } else {
150 buf->bs_nlink = be32_to_cpu(dic->di_nlink);
151 buf->bs_projid = be16_to_cpu(dic->di_projid);
152 }
153
154 buf->bs_ino = ino;
155 buf->bs_mode = be16_to_cpu(dic->di_mode);
156 buf->bs_uid = be32_to_cpu(dic->di_uid);
157 buf->bs_gid = be32_to_cpu(dic->di_gid);
158 buf->bs_size = be64_to_cpu(dic->di_size);
159 buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec);
160 buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec);
161 buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec);
162 buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec);
163 buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec);
164 buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec);
165 buf->bs_xflags = xfs_dic2xflags(dip);
166 buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog;
167 buf->bs_extents = be32_to_cpu(dic->di_nextents);
168 buf->bs_gen = be32_to_cpu(dic->di_gen);
169 memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
170 buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask);
171 buf->bs_dmstate = be16_to_cpu(dic->di_dmstate);
172 buf->bs_aextents = be16_to_cpu(dic->di_anextents);
173
174 switch (dic->di_format) {
175 case XFS_DINODE_FMT_DEV:
176 buf->bs_rdev = be32_to_cpu(dip->di_u.di_dev);
177 buf->bs_blksize = BLKDEV_IOSIZE;
178 buf->bs_blocks = 0;
179 break;
180 case XFS_DINODE_FMT_LOCAL:
181 case XFS_DINODE_FMT_UUID:
182 buf->bs_rdev = 0;
183 buf->bs_blksize = mp->m_sb.sb_blocksize;
184 buf->bs_blocks = 0;
185 break;
186 case XFS_DINODE_FMT_EXTENTS:
187 case XFS_DINODE_FMT_BTREE:
188 buf->bs_rdev = 0;
189 buf->bs_blksize = mp->m_sb.sb_blocksize;
190 buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
191 break;
192 }
193}
194
195STATIC int
196xfs_bulkstat_one_fmt(
197 void __user *ubuffer,
198 const xfs_bstat_t *buffer)
199{
200 if (copy_to_user(ubuffer, buffer, sizeof(*buffer)))
201 return -EFAULT;
202 return sizeof(*buffer);
203}
204
205
206
207
208
209int
210xfs_bulkstat_one(
211 xfs_mount_t *mp,
212 xfs_ino_t ino,
213 void __user *buffer,
214 int ubsize,
215 void *private_data,
216 xfs_daddr_t bno,
217 int *ubused,
218 void *dibuff,
219 int *stat)
220{
221 xfs_bstat_t *buf;
222 int error = 0;
223 xfs_dinode_t *dip;
224 bulkstat_one_fmt_pf formatter = private_data ? : xfs_bulkstat_one_fmt;
225
226 dip = (xfs_dinode_t *)dibuff;
227 *stat = BULKSTAT_RV_NOTHING;
228
229 if (!buffer || xfs_internal_inum(mp, ino))
230 return XFS_ERROR(EINVAL);
231 if (ubsize < sizeof(*buf))
232 return XFS_ERROR(ENOMEM);
233
234 buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
235
236 if (dip == NULL) {
237
238
239
240 error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
241 if (error)
242 goto out_free;
243 } else {
244 xfs_bulkstat_one_dinode(mp, ino, dip, buf);
245 }
246
247 error = formatter(buffer, buf);
248 if (error < 0) {
249 error = EFAULT;
250 goto out_free;
251 }
252
253 *stat = BULKSTAT_RV_DIDONE;
254 if (ubused)
255 *ubused = error;
256
257 out_free:
258 kmem_free(buf);
259 return error;
260}
261
262
263
264
265
266
267STATIC int
268xfs_bulkstat_use_dinode(
269 xfs_mount_t *mp,
270 int flags,
271 xfs_buf_t *bp,
272 int clustidx,
273 xfs_dinode_t **dipp)
274{
275 xfs_dinode_t *dip;
276 unsigned int aformat;
277
278 *dipp = NULL;
279 if (!bp || (flags & BULKSTAT_FG_IGET))
280 return 1;
281 dip = (xfs_dinode_t *)
282 xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
283
284
285
286
287
288
289
290 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC ||
291 !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) ||
292 !dip->di_core.di_mode)
293 return 0;
294 if (flags & BULKSTAT_FG_QUICK) {
295 *dipp = dip;
296 return 1;
297 }
298
299 aformat = dip->di_core.di_aformat;
300 if ((XFS_DFORK_Q(dip) == 0) ||
301 (aformat == XFS_DINODE_FMT_LOCAL) ||
302 (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) {
303 *dipp = dip;
304 return 1;
305 }
306 return 1;
307}
308
309#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
310
311
312
313
314int
315xfs_bulkstat(
316 xfs_mount_t *mp,
317 xfs_ino_t *lastinop,
318 int *ubcountp,
319 bulkstat_one_pf formatter,
320 void *private_data,
321 size_t statstruct_size,
322 char __user *ubuffer,
323 int flags,
324 int *done)
325{
326 xfs_agblock_t agbno=0;
327 xfs_buf_t *agbp;
328 xfs_agi_t *agi;
329 xfs_agino_t agino;
330 xfs_agnumber_t agno;
331 xfs_daddr_t bno;
332 int chunkidx;
333 int clustidx;
334 xfs_btree_cur_t *cur;
335 int end_of_ag;
336 int error;
337 int fmterror;
338 __int32_t gcnt;
339 xfs_inofree_t gfree;
340 xfs_agino_t gino;
341 int i;
342 int icount;
343 size_t irbsize;
344 xfs_ino_t ino;
345 xfs_inobt_rec_incore_t *irbp;
346 xfs_inobt_rec_incore_t *irbuf;
347 xfs_inobt_rec_incore_t *irbufend;
348 xfs_ino_t lastino;
349 int nbcluster;
350 int nicluster;
351 int nimask;
352 int nirbuf;
353 int rval;
354 int tmp;
355 int ubcount;
356 int ubleft;
357 char __user *ubufp;
358 int ubelem;
359 int ubused;
360 xfs_buf_t *bp;
361 xfs_dinode_t *dip;
362 xfs_inode_t *ip;
363
364
365
366
367 ino = (xfs_ino_t)*lastinop;
368 lastino = ino;
369 dip = NULL;
370 agno = XFS_INO_TO_AGNO(mp, ino);
371 agino = XFS_INO_TO_AGINO(mp, ino);
372 if (agno >= mp->m_sb.sb_agcount ||
373 ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
374 *done = 1;
375 *ubcountp = 0;
376 return 0;
377 }
378 if (!ubcountp || *ubcountp <= 0) {
379 return EINVAL;
380 }
381 ubcount = *ubcountp;
382 ubleft = ubcount * statstruct_size;
383 *ubcountp = ubelem = 0;
384 *done = 0;
385 fmterror = 0;
386 ubufp = ubuffer;
387 nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ?
388 mp->m_sb.sb_inopblock :
389 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
390 nimask = ~(nicluster - 1);
391 nbcluster = nicluster >> mp->m_sb.sb_inopblog;
392 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4,
393 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
394 nirbuf = irbsize / sizeof(*irbuf);
395
396
397
398
399
400 rval = 0;
401 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
402 cond_resched();
403 bp = NULL;
404 down_read(&mp->m_peraglock);
405 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
406 up_read(&mp->m_peraglock);
407 if (error) {
408
409
410
411 agno++;
412 agino = 0;
413 continue;
414 }
415 agi = XFS_BUF_TO_AGI(agbp);
416
417
418
419 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO,
420 (xfs_inode_t *)0, 0);
421 irbp = irbuf;
422 irbufend = irbuf + nirbuf;
423 end_of_ag = 0;
424
425
426
427
428 if (agino > 0) {
429
430
431
432 error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp);
433 if (!error &&
434 tmp &&
435
436 !(error = xfs_inobt_get_rec(cur, &gino, &gcnt,
437 &gfree, &i)) &&
438 i == 1 &&
439
440 agino < gino + XFS_INODES_PER_CHUNK &&
441
442 (chunkidx = agino - gino + 1) <
443 XFS_INODES_PER_CHUNK &&
444
445 XFS_INOBT_MASKN(chunkidx,
446 XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) {
447
448
449
450
451
452 for (i = 0; i < chunkidx; i++) {
453 if (XFS_INOBT_MASK(i) & ~gfree)
454 gcnt++;
455 }
456 gfree |= XFS_INOBT_MASKN(0, chunkidx);
457 irbp->ir_startino = gino;
458 irbp->ir_freecount = gcnt;
459 irbp->ir_free = gfree;
460 irbp++;
461 agino = gino + XFS_INODES_PER_CHUNK;
462 icount = XFS_INODES_PER_CHUNK - gcnt;
463 } else {
464
465
466
467
468 agino++;
469 icount = 0;
470 }
471
472
473
474 if (!error)
475 error = xfs_inobt_increment(cur, 0, &tmp);
476 } else {
477
478
479
480 error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp);
481 icount = 0;
482 }
483
484
485
486
487 while (irbp < irbufend && icount < ubcount) {
488
489
490
491
492 while (error) {
493 agino += XFS_INODES_PER_CHUNK;
494 if (XFS_AGINO_TO_AGBNO(mp, agino) >=
495 be32_to_cpu(agi->agi_length))
496 break;
497 error = xfs_inobt_lookup_ge(cur, agino, 0, 0,
498 &tmp);
499 cond_resched();
500 }
501
502
503
504
505 if (error ||
506 (error = xfs_inobt_get_rec(cur, &gino, &gcnt,
507 &gfree, &i)) ||
508 i == 0) {
509 end_of_ag = 1;
510 break;
511 }
512
513
514
515
516 if (gcnt < XFS_INODES_PER_CHUNK) {
517
518
519
520
521
522 for (agbno = XFS_AGINO_TO_AGBNO(mp, gino),
523 chunkidx = 0;
524 chunkidx < XFS_INODES_PER_CHUNK;
525 chunkidx += nicluster,
526 agbno += nbcluster) {
527 if (XFS_INOBT_MASKN(chunkidx,
528 nicluster) & ~gfree)
529 xfs_btree_reada_bufs(mp, agno,
530 agbno, nbcluster);
531 }
532 irbp->ir_startino = gino;
533 irbp->ir_freecount = gcnt;
534 irbp->ir_free = gfree;
535 irbp++;
536 icount += XFS_INODES_PER_CHUNK - gcnt;
537 }
538
539
540
541 agino = gino + XFS_INODES_PER_CHUNK;
542 error = xfs_inobt_increment(cur, 0, &tmp);
543 cond_resched();
544 }
545
546
547
548
549
550 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
551 xfs_buf_relse(agbp);
552
553
554
555 irbufend = irbp;
556 for (irbp = irbuf;
557 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
558
559
560
561 for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
562 XFS_BULKSTAT_UBLEFT(ubleft) &&
563 irbp->ir_freecount < XFS_INODES_PER_CHUNK;
564 chunkidx++, clustidx++, agino++) {
565 ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581 if ((chunkidx & (nicluster - 1)) == 0) {
582 agbno = XFS_AGINO_TO_AGBNO(mp,
583 irbp->ir_startino) +
584 ((chunkidx & nimask) >>
585 mp->m_sb.sb_inopblog);
586
587 if (flags & (BULKSTAT_FG_QUICK |
588 BULKSTAT_FG_INLINE)) {
589 ino = XFS_AGINO_TO_INO(mp, agno,
590 agino);
591 bno = XFS_AGB_TO_DADDR(mp, agno,
592 agbno);
593
594
595
596
597 ASSERT(xfs_inode_zone != NULL);
598 ip = kmem_zone_zalloc(xfs_inode_zone,
599 KM_SLEEP);
600 ip->i_ino = ino;
601 ip->i_mount = mp;
602 spin_lock_init(&ip->i_flags_lock);
603 if (bp)
604 xfs_buf_relse(bp);
605 error = xfs_itobp(mp, NULL, ip,
606 &dip, &bp, bno,
607 XFS_IMAP_BULKSTAT,
608 XFS_BUF_LOCK);
609 if (!error)
610 clustidx = ip->i_boffset / mp->m_sb.sb_inodesize;
611 kmem_zone_free(xfs_inode_zone, ip);
612 if (XFS_TEST_ERROR(error != 0,
613 mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
614 XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
615 bp = NULL;
616 ubleft = 0;
617 rval = error;
618 break;
619 }
620 }
621 }
622 ino = XFS_AGINO_TO_INO(mp, agno, agino);
623 bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
624
625
626
627 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
628 lastino = ino;
629 continue;
630 }
631
632
633
634
635 irbp->ir_freecount++;
636 if (!xfs_bulkstat_use_dinode(mp, flags, bp,
637 clustidx, &dip)) {
638 lastino = ino;
639 continue;
640 }
641
642
643
644
645 if ((flags & BULKSTAT_FG_INLINE) && !dip) {
646 if (bp)
647 xfs_buf_relse(bp);
648 bp = NULL;
649 }
650
651
652
653
654
655
656
657
658
659
660
661 ubused = statstruct_size;
662 error = formatter(mp, ino, ubufp,
663 ubleft, private_data,
664 bno, &ubused, dip, &fmterror);
665 if (fmterror == BULKSTAT_RV_NOTHING) {
666 if (error && error != ENOENT &&
667 error != EINVAL) {
668 ubleft = 0;
669 rval = error;
670 break;
671 }
672 lastino = ino;
673 continue;
674 }
675 if (fmterror == BULKSTAT_RV_GIVEUP) {
676 ubleft = 0;
677 ASSERT(error);
678 rval = error;
679 break;
680 }
681 if (ubufp)
682 ubufp += ubused;
683 ubleft -= ubused;
684 ubelem++;
685 lastino = ino;
686 }
687
688 cond_resched();
689 }
690
691 if (bp)
692 xfs_buf_relse(bp);
693
694
695
696
697 if (XFS_BULKSTAT_UBLEFT(ubleft)) {
698 if (end_of_ag) {
699 agno++;
700 agino = 0;
701 } else
702 agino = XFS_INO_TO_AGINO(mp, lastino);
703 } else
704 break;
705 }
706
707
708
709 kmem_free(irbuf);
710 *ubcountp = ubelem;
711
712
713
714 if (ubelem)
715 rval = 0;
716 if (agno >= mp->m_sb.sb_agcount) {
717
718
719
720
721
722 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
723 *done = 1;
724 } else
725 *lastinop = (xfs_ino_t)lastino;
726
727 return rval;
728}
729
730
731
732
733
734int
735xfs_bulkstat_single(
736 xfs_mount_t *mp,
737 xfs_ino_t *lastinop,
738 char __user *buffer,
739 int *done)
740{
741 int count;
742 int error;
743 xfs_ino_t ino;
744 int res;
745
746
747
748
749
750
751
752
753
754
755 ino = (xfs_ino_t)*lastinop;
756 error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
757 NULL, 0, NULL, NULL, &res);
758 if (error) {
759
760
761
762
763 (*lastinop)--;
764 count = 1;
765 if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
766 NULL, sizeof(xfs_bstat_t), buffer,
767 BULKSTAT_FG_IGET, done))
768 return error;
769 if (count == 0 || (xfs_ino_t)*lastinop != ino)
770 return error == EFSCORRUPTED ?
771 XFS_ERROR(EINVAL) : error;
772 else
773 return 0;
774 }
775 *done = 0;
776 return 0;
777}
778
779int
780xfs_inumbers_fmt(
781 void __user *ubuffer,
782 const xfs_inogrp_t *buffer,
783 long count,
784 long *written)
785{
786 if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer)))
787 return -EFAULT;
788 *written = count * sizeof(*buffer);
789 return 0;
790}
791
792
793
794
795int
796xfs_inumbers(
797 xfs_mount_t *mp,
798 xfs_ino_t *lastino,
799 int *count,
800 void __user *ubuffer,
801 inumbers_fmt_pf formatter)
802{
803 xfs_buf_t *agbp;
804 xfs_agino_t agino;
805 xfs_agnumber_t agno;
806 int bcount;
807 xfs_inogrp_t *buffer;
808 int bufidx;
809 xfs_btree_cur_t *cur;
810 int error;
811 __int32_t gcnt;
812 xfs_inofree_t gfree;
813 xfs_agino_t gino;
814 int i;
815 xfs_ino_t ino;
816 int left;
817 int tmp;
818
819 ino = (xfs_ino_t)*lastino;
820 agno = XFS_INO_TO_AGNO(mp, ino);
821 agino = XFS_INO_TO_AGINO(mp, ino);
822 left = *count;
823 *count = 0;
824 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
825 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
826 error = bufidx = 0;
827 cur = NULL;
828 agbp = NULL;
829 while (left > 0 && agno < mp->m_sb.sb_agcount) {
830 if (agbp == NULL) {
831 down_read(&mp->m_peraglock);
832 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
833 up_read(&mp->m_peraglock);
834 if (error) {
835
836
837
838
839 ASSERT(cur == NULL);
840 agbp = NULL;
841 agno++;
842 agino = 0;
843 continue;
844 }
845 cur = xfs_btree_init_cursor(mp, NULL, agbp, agno,
846 XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
847 error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp);
848 if (error) {
849 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
850 cur = NULL;
851 xfs_buf_relse(agbp);
852 agbp = NULL;
853
854
855
856
857
858 agino += XFS_INODES_PER_CHUNK - 1;
859 continue;
860 }
861 }
862 if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree,
863 &i)) ||
864 i == 0) {
865 xfs_buf_relse(agbp);
866 agbp = NULL;
867 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
868 cur = NULL;
869 agno++;
870 agino = 0;
871 continue;
872 }
873 agino = gino + XFS_INODES_PER_CHUNK - 1;
874 buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino);
875 buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt;
876 buffer[bufidx].xi_allocmask = ~gfree;
877 bufidx++;
878 left--;
879 if (bufidx == bcount) {
880 long written;
881 if (formatter(ubuffer, buffer, bufidx, &written)) {
882 error = XFS_ERROR(EFAULT);
883 break;
884 }
885 ubuffer += written;
886 *count += bufidx;
887 bufidx = 0;
888 }
889 if (left) {
890 error = xfs_inobt_increment(cur, 0, &tmp);
891 if (error) {
892 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
893 cur = NULL;
894 xfs_buf_relse(agbp);
895 agbp = NULL;
896
897
898
899
900 agino += XFS_INODES_PER_CHUNK;
901 continue;
902 }
903 }
904 }
905 if (!error) {
906 if (bufidx) {
907 long written;
908 if (formatter(ubuffer, buffer, bufidx, &written))
909 error = XFS_ERROR(EFAULT);
910 else
911 *count += bufidx;
912 }
913 *lastino = XFS_AGINO_TO_INO(mp, agno, agino);
914 }
915 kmem_free(buffer);
916 if (cur)
917 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
918 XFS_BTREE_NOERROR));
919 if (agbp)
920 xfs_buf_relse(agbp);
921 return error;
922}