rgrp.c 68.5 KB
Newer Older
David Teigland's avatar
David Teigland committed
1
2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4
5
6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8
9
 */

10
11
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

David Teigland's avatar
David Teigland committed
12
13
14
15
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
16
#include <linux/fs.h>
17
#include <linux/gfs2_ondisk.h>
18
#include <linux/prefetch.h>
19
#include <linux/blkdev.h>
20
#include <linux/rbtree.h>
Steven Whitehouse's avatar
Steven Whitehouse committed
21
#include <linux/random.h>
David Teigland's avatar
David Teigland committed
22
23

#include "gfs2.h"
24
#include "incore.h"
David Teigland's avatar
David Teigland committed
25
26
27
28
29
30
31
32
#include "glock.h"
#include "glops.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
33
#include "util.h"
34
#include "log.h"
35
#include "inode.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
36
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
37

Steven Whitehouse's avatar
Steven Whitehouse committed
38
#define BFITNOENT ((u32)~0)
39
#define NO_BLOCK ((u64)~0)
40

41
42
43
44
45
46
47
48
49
50
#if BITS_PER_LONG == 32
#define LBITMASK   (0x55555555UL)
#define LBITSKIP55 (0x55555555UL)
#define LBITSKIP00 (0x00000000UL)
#else
#define LBITMASK   (0x5555555555555555UL)
#define LBITSKIP55 (0x5555555555555555UL)
#define LBITSKIP00 (0x0000000000000000UL)
#endif

51
52
53
/*
 * These routines are used by the resource group routines (rgrp.c)
 * to keep track of block allocation.  Each block is represented by two
54
55
56
57
58
59
 * bits.  So, each byte represents GFS2_NBBY (i.e. 4) blocks.
 *
 * 0 = Free
 * 1 = Used (not metadata)
 * 2 = Unlinked (still in use) inode
 * 3 = Used (metadata)
60
61
 */

62
63
64
65
66
struct gfs2_extent {
	struct gfs2_rbm rbm;
	u32 len;
};

67
68
static const char valid_change[16] = {
	        /* current */
69
	/* n */ 0, 1, 1, 1,
70
	/* e */ 1, 0, 0, 0,
71
	/* w */ 0, 0, 0, 1,
72
73
74
	        1, 0, 0, 0
};

75
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76
			 const struct gfs2_inode *ip, bool nowrap);
77
78


79
80
/**
 * gfs2_setbit - Set a bit in the bitmaps
81
82
 * @rbm: The position of the bit to set
 * @do_clone: Also set the clone bitmap, if it exists
83
84
85
86
 * @new_state: the new state of the block
 *
 */

87
static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
88
			       unsigned char new_state)
89
{
90
	unsigned char *byte1, *byte2, *end, cur_state;
Bob Peterson's avatar
Bob Peterson committed
91
92
	struct gfs2_bitmap *bi = rbm_bi(rbm);
	unsigned int buflen = bi->bi_len;
93
	const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
94

Bob Peterson's avatar
Bob Peterson committed
95
96
	byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
	end = bi->bi_bh->b_data + bi->bi_offset + buflen;
97

98
	BUG_ON(byte1 >= end);
99

100
	cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
101

102
	if (unlikely(!valid_change[new_state * 4 + cur_state])) {
103
104
105
106
107
108
		pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
			rbm->offset, cur_state, new_state);
		pr_warn("rgrp=0x%llx bi_start=0x%x\n",
			(unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
		pr_warn("bi_offset=0x%x bi_len=0x%x\n",
			bi->bi_offset, bi->bi_len);
109
		dump_stack();
110
		gfs2_consist_rgrpd(rbm->rgd);
111
112
113
114
		return;
	}
	*byte1 ^= (cur_state ^ new_state) << bit;

Bob Peterson's avatar
Bob Peterson committed
115
116
	if (do_clone && bi->bi_clone) {
		byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
117
118
119
		cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
		*byte2 ^= (cur_state ^ new_state) << bit;
	}
120
121
122
123
}

/**
 * gfs2_testbit - test a bit in the bitmaps
124
 * @rbm: The bit to test
125
 *
126
 * Returns: The two bit block state of the requested bit
127
128
 */

129
static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
130
{
Bob Peterson's avatar
Bob Peterson committed
131
132
	struct gfs2_bitmap *bi = rbm_bi(rbm);
	const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
133
	const u8 *byte;
134
135
	unsigned int bit;

136
137
	byte = buffer + (rbm->offset / GFS2_NBBY);
	bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
138

139
	return (*byte >> bit) & GFS2_BIT_MASK;
140
141
}

142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
/**
 * gfs2_bit_search
 * @ptr: Pointer to bitmap data
 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
 * @state: The state we are searching for
 *
 * We xor the bitmap data with a patter which is the bitwise opposite
 * of what we are looking for, this gives rise to a pattern of ones
 * wherever there is a match. Since we have two bits per entry, we
 * take this pattern, shift it down by one place and then and it with
 * the original. All the even bit positions (0,2,4, etc) then represent
 * successful matches, so we mask with 0x55555..... to remove the unwanted
 * odd bit positions.
 *
 * This allows searching of a whole u64 at once (32 blocks) with a
 * single test (on 64 bit arches).
 */

static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
{
	u64 tmp;
	static const u64 search[] = {
164
165
166
167
		[0] = 0xffffffffffffffffULL,
		[1] = 0xaaaaaaaaaaaaaaaaULL,
		[2] = 0x5555555555555555ULL,
		[3] = 0x0000000000000000ULL,
168
169
170
171
172
173
174
	};
	tmp = le64_to_cpu(*ptr) ^ search[state];
	tmp &= (tmp >> 1);
	tmp &= mask;
	return tmp;
}

Bob Peterson's avatar
Bob Peterson committed
175
176
177
178
179
180
181
182
183
184
185
186
/**
 * rs_cmp - multi-block reservation range compare
 * @blk: absolute file system block number of the new reservation
 * @len: number of blocks in the new reservation
 * @rs: existing reservation to compare against
 *
 * returns: 1 if the block range is beyond the reach of the reservation
 *         -1 if the block range is before the start of the reservation
 *          0 if the block range overlaps with the reservation
 */
static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
{
187
	u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
Bob Peterson's avatar
Bob Peterson committed
188
189
190
191
192
193
194
195

	if (blk >= startblk + rs->rs_free)
		return 1;
	if (blk + len - 1 < startblk)
		return -1;
	return 0;
}

196
197
198
/**
 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
 *       a block in a given allocation state.
199
 * @buf: the buffer that holds the bitmaps
200
 * @len: the length (in bytes) of the buffer
201
 * @goal: start search at this block's bit-pair (within @buffer)
202
 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
203
204
205
 *
 * Scope of @goal and returned block number is only within this bitmap buffer,
 * not entire rgrp or filesystem.  @buffer will be offset from the actual
206
207
208
209
210
 * beginning of a bitmap block buffer, skipping any header structures, but
 * headers are always a multiple of 64 bits long so that the buffer is
 * always aligned to a 64 bit boundary.
 *
 * The size of the buffer is in bytes, but is it assumed that it is
211
 * always ok to read a complete multiple of 64 bits at the end
212
 * of the block in case the end is no aligned to a natural boundary.
213
214
215
216
 *
 * Return: the block number (bitmap buffer scope) that was found
 */

217
218
static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
		       u32 goal, u8 state)
219
{
220
221
222
223
	u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
	const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
	const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
	u64 tmp;
224
	u64 mask = 0x5555555555555555ULL;
225
226
227
228
229
230
231
	u32 bit;

	/* Mask off bits we don't care about at the start of the search */
	mask <<= spoint;
	tmp = gfs2_bit_search(ptr, mask, state);
	ptr++;
	while(tmp == 0 && ptr < end) {
232
		tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
233
		ptr++;
234
	}
235
236
237
238
239
240
241
	/* Mask off any bits which are more than len bytes from the start */
	if (ptr == end && (len & (sizeof(u64) - 1)))
		tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
	/* Didn't find anything, so return */
	if (tmp == 0)
		return BFITNOENT;
	ptr--;
242
	bit = __ffs64(tmp);
243
244
	bit /= 2;	/* two bits per entry in the bitmap */
	return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
245
246
}

247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
/**
 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
 * @rbm: The rbm with rgd already set correctly
 * @block: The block number (filesystem relative)
 *
 * This sets the bi and offset members of an rbm based on a
 * resource group and a filesystem relative block number. The
 * resource group must be set in the rbm on entry, the bi and
 * offset members will be set by this function.
 *
 * Returns: 0 on success, or an error code
 */

static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
	u64 rblock = block - rbm->rgd->rd_data0;

	if (WARN_ON_ONCE(rblock > UINT_MAX))
		return -EINVAL;
	if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
		return -E2BIG;

Bob Peterson's avatar
Bob Peterson committed
269
	rbm->bii = 0;
270
271
	rbm->offset = (u32)(rblock);
	/* Check if the block is within the first block */
Bob Peterson's avatar
Bob Peterson committed
272
	if (rbm->offset < rbm_bi(rbm)->bi_blocks)
273
274
275
276
277
		return 0;

	/* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
	rbm->offset += (sizeof(struct gfs2_rgrp) -
			sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
Bob Peterson's avatar
Bob Peterson committed
278
279
	rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
	rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
280
281
282
	return 0;
}

283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
/**
 * gfs2_rbm_incr - increment an rbm structure
 * @rbm: The rbm with rgd already set correctly
 *
 * This function takes an existing rbm structure and increments it to the next
 * viable block offset.
 *
 * Returns: If incrementing the offset would cause the rbm to go past the
 *          end of the rgrp, true is returned, otherwise false.
 *
 */

static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
{
	if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
		rbm->offset++;
		return false;
	}
	if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
		return true;

	rbm->offset = 0;
	rbm->bii++;
	return false;
}

309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
/**
 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
 * @rbm: Position to search (value/result)
 * @n_unaligned: Number of unaligned blocks to check
 * @len: Decremented for each block found (terminate on zero)
 *
 * Returns: true if a non-free block is encountered
 */

static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
{
	u32 n;
	u8 res;

	for (n = 0; n < n_unaligned; n++) {
		res = gfs2_testbit(rbm);
		if (res != GFS2_BLKST_FREE)
			return true;
		(*len)--;
		if (*len == 0)
			return true;
330
		if (gfs2_rbm_incr(rbm))
331
332
333
334
335
336
337
338
			return true;
	}

	return false;
}

/**
 * gfs2_free_extlen - Return extent length of free blocks
339
 * @rrbm: Starting position
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
 * @len: Max length to check
 *
 * Starting at the block specified by the rbm, see how many free blocks
 * there are, not reading more than len blocks ahead. This can be done
 * using memchr_inv when the blocks are byte aligned, but has to be done
 * on a block by block basis in case of unaligned blocks. Also this
 * function can cope with bitmap boundaries (although it must stop on
 * a resource group boundary)
 *
 * Returns: Number of free blocks in the extent
 */

static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
{
	struct gfs2_rbm rbm = *rrbm;
	u32 n_unaligned = rbm.offset & 3;
	u32 size = len;
	u32 bytes;
	u32 chunk_size;
	u8 *ptr, *start, *end;
	u64 block;
Bob Peterson's avatar
Bob Peterson committed
361
	struct gfs2_bitmap *bi;
362
363
364
365
366

	if (n_unaligned &&
	    gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
		goto out;

367
	n_unaligned = len & 3;
368
369
	/* Start is now byte aligned */
	while (len > 3) {
Bob Peterson's avatar
Bob Peterson committed
370
371
372
373
374
375
		bi = rbm_bi(&rbm);
		start = bi->bi_bh->b_data;
		if (bi->bi_clone)
			start = bi->bi_clone;
		end = start + bi->bi_bh->b_size;
		start += bi->bi_offset;
376
377
378
379
380
381
382
383
384
		BUG_ON(rbm.offset & 3);
		start += (rbm.offset / GFS2_NBBY);
		bytes = min_t(u32, len / GFS2_NBBY, (end - start));
		ptr = memchr_inv(start, 0, bytes);
		chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
		chunk_size *= GFS2_NBBY;
		BUG_ON(len < chunk_size);
		len -= chunk_size;
		block = gfs2_rbm_to_block(&rbm);
385
386
		if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
			n_unaligned = 0;
387
			break;
388
389
390
391
392
		}
		if (ptr) {
			n_unaligned = 3;
			break;
		}
393
394
395
396
397
398
399
400
401
402
		n_unaligned = len & 3;
	}

	/* Deal with any bits left over at the end */
	if (n_unaligned)
		gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
out:
	return size - len;
}

403
404
/**
 * gfs2_bitcount - count the number of bits in a certain state
405
 * @rgd: the resource group descriptor
406
407
408
409
410
411
412
 * @buffer: the buffer that holds the bitmaps
 * @buflen: the length (in bytes) of the buffer
 * @state: the state of the block we're looking for
 *
 * Returns: The number of bits
 */

413
414
static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
			 unsigned int buflen, u8 state)
415
{
416
417
418
419
420
	const u8 *byte = buffer;
	const u8 *end = buffer + buflen;
	const u8 state1 = state << 2;
	const u8 state2 = state << 4;
	const u8 state3 = state << 6;
421
	u32 count = 0;
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436

	for (; byte < end; byte++) {
		if (((*byte) & 0x03) == state)
			count++;
		if (((*byte) & 0x0C) == state1)
			count++;
		if (((*byte) & 0x30) == state2)
			count++;
		if (((*byte) & 0xC0) == state3)
			count++;
	}

	return count;
}

David Teigland's avatar
David Teigland committed
437
438
439
440
441
442
443
444
445
446
/**
 * gfs2_rgrp_verify - Verify that a resource group is consistent
 * @rgd: the rgrp
 *
 */

void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_bitmap *bi = NULL;
447
	u32 length = rgd->rd_length;
448
	u32 count[4], tmp;
David Teigland's avatar
David Teigland committed
449
450
	int buf, x;

451
	memset(count, 0, 4 * sizeof(u32));
David Teigland's avatar
David Teigland committed
452
453
454
455
456
457
458
459
460
461
462

	/* Count # blocks in each of 4 possible allocation states */
	for (buf = 0; buf < length; buf++) {
		bi = rgd->rd_bits + buf;
		for (x = 0; x < 4; x++)
			count[x] += gfs2_bitcount(rgd,
						  bi->bi_bh->b_data +
						  bi->bi_offset,
						  bi->bi_len, x);
	}

463
	if (count[0] != rgd->rd_free) {
David Teigland's avatar
David Teigland committed
464
465
		if (gfs2_consist_rgrpd(rgd))
			fs_err(sdp, "free data mismatch:  %u != %u\n",
466
			       count[0], rgd->rd_free);
David Teigland's avatar
David Teigland committed
467
468
469
		return;
	}

470
	tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
471
	if (count[1] != tmp) {
David Teigland's avatar
David Teigland committed
472
473
474
475
476
477
		if (gfs2_consist_rgrpd(rgd))
			fs_err(sdp, "used data mismatch:  %u != %u\n",
			       count[1], tmp);
		return;
	}

478
	if (count[2] + count[3] != rgd->rd_dinodes) {
David Teigland's avatar
David Teigland committed
479
		if (gfs2_consist_rgrpd(rgd))
480
			fs_err(sdp, "used metadata mismatch:  %u != %u\n",
481
			       count[2] + count[3], rgd->rd_dinodes);
David Teigland's avatar
David Teigland committed
482
483
484
485
486
487
488
		return;
	}
}

/**
 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
 * @sdp: The GFS2 superblock
489
490
 * @blk: The data block number
 * @exact: True if this needs to be an exact match
David Teigland's avatar
David Teigland committed
491
492
493
494
 *
 * Returns: The resource group, or NULL if not found
 */

Steven Whitehouse's avatar
Steven Whitehouse committed
495
struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
David Teigland's avatar
David Teigland committed
496
{
Steven Whitehouse's avatar
Steven Whitehouse committed
497
	struct rb_node *n, *next;
498
	struct gfs2_rgrpd *cur;
David Teigland's avatar
David Teigland committed
499
500

	spin_lock(&sdp->sd_rindex_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
501
502
503
504
	n = sdp->sd_rindex_tree.rb_node;
	while (n) {
		cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
		next = NULL;
505
		if (blk < cur->rd_addr)
Steven Whitehouse's avatar
Steven Whitehouse committed
506
			next = n->rb_left;
507
		else if (blk >= cur->rd_data0 + cur->rd_data)
Steven Whitehouse's avatar
Steven Whitehouse committed
508
509
			next = n->rb_right;
		if (next == NULL) {
David Teigland's avatar
David Teigland committed
510
			spin_unlock(&sdp->sd_rindex_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
511
512
513
514
515
516
			if (exact) {
				if (blk < cur->rd_addr)
					return NULL;
				if (blk >= cur->rd_data0 + cur->rd_data)
					return NULL;
			}
517
			return cur;
David Teigland's avatar
David Teigland committed
518
		}
Steven Whitehouse's avatar
Steven Whitehouse committed
519
		n = next;
David Teigland's avatar
David Teigland committed
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
	}
	spin_unlock(&sdp->sd_rindex_spin);

	return NULL;
}

/**
 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
 * @sdp: The GFS2 superblock
 *
 * Returns: The first rgrp in the filesystem
 */

struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
{
535
536
537
	const struct rb_node *n;
	struct gfs2_rgrpd *rgd;

538
	spin_lock(&sdp->sd_rindex_spin);
539
540
	n = rb_first(&sdp->sd_rindex_tree);
	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
541
	spin_unlock(&sdp->sd_rindex_spin);
542
543

	return rgd;
David Teigland's avatar
David Teigland committed
544
545
546
547
}

/**
 * gfs2_rgrpd_get_next - get the next RG
548
 * @rgd: the resource group descriptor
David Teigland's avatar
David Teigland committed
549
550
551
552
553
554
 *
 * Returns: The next rgrp
 */

struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
{
555
556
557
558
559
560
561
562
563
564
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	const struct rb_node *n;

	spin_lock(&sdp->sd_rindex_spin);
	n = rb_next(&rgd->rd_node);
	if (n == NULL)
		n = rb_first(&sdp->sd_rindex_tree);

	if (unlikely(&rgd->rd_node == n)) {
		spin_unlock(&sdp->sd_rindex_spin);
David Teigland's avatar
David Teigland committed
565
		return NULL;
566
567
568
569
	}
	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
	spin_unlock(&sdp->sd_rindex_spin);
	return rgd;
David Teigland's avatar
David Teigland committed
570
571
}

572
573
574
575
576
577
578
void check_and_update_goal(struct gfs2_inode *ip)
{
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
		ip->i_goal = ip->i_no_addr;
}

579
580
581
582
583
584
585
586
587
588
589
void gfs2_free_clones(struct gfs2_rgrpd *rgd)
{
	int x;

	for (x = 0; x < rgd->rd_length; x++) {
		struct gfs2_bitmap *bi = rgd->rd_bits + x;
		kfree(bi->bi_clone);
		bi->bi_clone = NULL;
	}
}

590
/**
591
592
 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
 *                 plus a quota allocations data structure, if necessary
593
594
 * @ip: the inode for this reservation
 */
595
int gfs2_rsqa_alloc(struct gfs2_inode *ip)
596
{
597
	return gfs2_qa_alloc(ip);
598
599
}

600
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
Bob Peterson's avatar
Bob Peterson committed
601
{
602
603
604
	gfs2_print_dbg(seq, "  B: n:%llu s:%llu b:%u f:%u\n",
		       (unsigned long long)rs->rs_inum,
		       (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
605
		       rs->rs_rbm.offset, rs->rs_free);
Bob Peterson's avatar
Bob Peterson committed
606
607
}

608
/**
Bob Peterson's avatar
Bob Peterson committed
609
610
611
612
 * __rs_deltree - remove a multi-block reservation from the rgd tree
 * @rs: The reservation to remove
 *
 */
613
static void __rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson's avatar
Bob Peterson committed
614
615
616
617
618
619
{
	struct gfs2_rgrpd *rgd;

	if (!gfs2_rs_active(rs))
		return;

620
	rgd = rs->rs_rbm.rgd;
621
	trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
622
	rb_erase(&rs->rs_node, &rgd->rd_rstree);
623
	RB_CLEAR_NODE(&rs->rs_node);
Bob Peterson's avatar
Bob Peterson committed
624
625

	if (rs->rs_free) {
Bob Peterson's avatar
Bob Peterson committed
626
627
		struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);

628
		/* return reserved blocks to the rgrp */
629
630
		BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
		rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
631
632
633
634
635
		/* The rgrp extent failure point is likely not to increase;
		   it will only do so if the freed blocks are somehow
		   contiguous with a span of free blocks that follows. Still,
		   it will force the number to be recalculated later. */
		rgd->rd_extfail_pt += rs->rs_free;
Bob Peterson's avatar
Bob Peterson committed
636
		rs->rs_free = 0;
Bob Peterson's avatar
Bob Peterson committed
637
		clear_bit(GBF_FULL, &bi->bi_flags);
Bob Peterson's avatar
Bob Peterson committed
638
639
640
641
642
643
644
645
	}
}

/**
 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
 * @rs: The reservation to remove
 *
 */
646
void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson's avatar
Bob Peterson committed
647
648
649
{
	struct gfs2_rgrpd *rgd;

650
651
652
	rgd = rs->rs_rbm.rgd;
	if (rgd) {
		spin_lock(&rgd->rd_rsspin);
653
		__rs_deltree(rs);
654
		BUG_ON(rs->rs_free);
655
656
		spin_unlock(&rgd->rd_rsspin);
	}
Bob Peterson's avatar
Bob Peterson committed
657
658
659
}

/**
660
 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
661
 * @ip: The inode for this reservation
662
 * @wcount: The inode's write count, or NULL
663
664
 *
 */
665
void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
666
667
{
	down_write(&ip->i_rw_mutex);
668
	if ((wcount == NULL) || (atomic_read(wcount) <= 1))
669
		gfs2_rs_deltree(&ip->i_res);
670
	up_write(&ip->i_rw_mutex);
671
	gfs2_qa_delete(ip, wcount);
672
673
}

Bob Peterson's avatar
Bob Peterson committed
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
/**
 * return_all_reservations - return all reserved blocks back to the rgrp.
 * @rgd: the rgrp that needs its space back
 *
 * We previously reserved a bunch of blocks for allocation. Now we need to
 * give them back. This leave the reservation structures in tact, but removes
 * all of their corresponding "no-fly zones".
 */
static void return_all_reservations(struct gfs2_rgrpd *rgd)
{
	struct rb_node *n;
	struct gfs2_blkreserv *rs;

	spin_lock(&rgd->rd_rsspin);
	while ((n = rb_first(&rgd->rd_rstree))) {
		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
690
		__rs_deltree(rs);
Bob Peterson's avatar
Bob Peterson committed
691
692
693
694
	}
	spin_unlock(&rgd->rd_rsspin);
}

695
void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
David Teigland's avatar
David Teigland committed
696
{
697
	struct rb_node *n;
David Teigland's avatar
David Teigland committed
698
699
700
	struct gfs2_rgrpd *rgd;
	struct gfs2_glock *gl;

701
702
	while ((n = rb_first(&sdp->sd_rindex_tree))) {
		rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
David Teigland's avatar
David Teigland committed
703
704
		gl = rgd->rd_gl;

705
		rb_erase(n, &sdp->sd_rindex_tree);
David Teigland's avatar
David Teigland committed
706
707

		if (gl) {
708
			glock_clear_object(gl, rgd);
709
			gfs2_rgrp_brelse(rgd);
David Teigland's avatar
David Teigland committed
710
711
712
			gfs2_glock_put(gl);
		}

713
		gfs2_free_clones(rgd);
David Teigland's avatar
David Teigland committed
714
		kfree(rgd->rd_bits);
715
		rgd->rd_bits = NULL;
Bob Peterson's avatar
Bob Peterson committed
716
		return_all_reservations(rgd);
717
		kmem_cache_free(gfs2_rgrpd_cachep, rgd);
David Teigland's avatar
David Teigland committed
718
719
720
	}
}

721
722
static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
{
723
724
725
726
727
	pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
	pr_info("ri_length = %u\n", rgd->rd_length);
	pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
	pr_info("ri_data = %u\n", rgd->rd_data);
	pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
728
729
}

David Teigland's avatar
David Teigland committed
730
731
732
733
734
735
736
737
738
739
740
741
742
/**
 * gfs2_compute_bitstructs - Compute the bitmap sizes
 * @rgd: The resource group descriptor
 *
 * Calculates bitmap descriptors, one for each block that contains bitmap data
 *
 * Returns: errno
 */

static int compute_bitstructs(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_bitmap *bi;
743
	u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
744
	u32 bytes_left, bytes;
David Teigland's avatar
David Teigland committed
745
746
	int x;

747
748
749
	if (!length)
		return -EINVAL;

750
	rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
David Teigland's avatar
David Teigland committed
751
752
753
	if (!rgd->rd_bits)
		return -ENOMEM;

754
	bytes_left = rgd->rd_bitbytes;
David Teigland's avatar
David Teigland committed
755
756
757
758

	for (x = 0; x < length; x++) {
		bi = rgd->rd_bits + x;

759
		bi->bi_flags = 0;
David Teigland's avatar
David Teigland committed
760
761
762
763
764
765
		/* small rgrp; bitmap stored completely in header block */
		if (length == 1) {
			bytes = bytes_left;
			bi->bi_offset = sizeof(struct gfs2_rgrp);
			bi->bi_start = 0;
			bi->bi_len = bytes;
766
			bi->bi_blocks = bytes * GFS2_NBBY;
David Teigland's avatar
David Teigland committed
767
768
769
770
771
772
		/* header block */
		} else if (x == 0) {
			bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
			bi->bi_offset = sizeof(struct gfs2_rgrp);
			bi->bi_start = 0;
			bi->bi_len = bytes;
773
			bi->bi_blocks = bytes * GFS2_NBBY;
David Teigland's avatar
David Teigland committed
774
775
776
777
		/* last block */
		} else if (x + 1 == length) {
			bytes = bytes_left;
			bi->bi_offset = sizeof(struct gfs2_meta_header);
778
			bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teigland's avatar
David Teigland committed
779
			bi->bi_len = bytes;
780
			bi->bi_blocks = bytes * GFS2_NBBY;
David Teigland's avatar
David Teigland committed
781
782
		/* other blocks */
		} else {
783
784
			bytes = sdp->sd_sb.sb_bsize -
				sizeof(struct gfs2_meta_header);
David Teigland's avatar
David Teigland committed
785
			bi->bi_offset = sizeof(struct gfs2_meta_header);
786
			bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teigland's avatar
David Teigland committed
787
			bi->bi_len = bytes;
788
			bi->bi_blocks = bytes * GFS2_NBBY;
David Teigland's avatar
David Teigland committed
789
790
791
792
793
794
795
796
797
798
		}

		bytes_left -= bytes;
	}

	if (bytes_left) {
		gfs2_consist_rgrpd(rgd);
		return -EIO;
	}
	bi = rgd->rd_bits + (length - 1);
799
	if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
David Teigland's avatar
David Teigland committed
800
		if (gfs2_consist_rgrpd(rgd)) {
801
			gfs2_rindex_print(rgd);
David Teigland's avatar
David Teigland committed
802
803
804
805
806
807
808
809
810
			fs_err(sdp, "start=%u len=%u offset=%u\n",
			       bi->bi_start, bi->bi_len, bi->bi_offset);
		}
		return -EIO;
	}

	return 0;
}

811
812
/**
 * gfs2_ri_total - Total up the file system space, according to the rindex.
813
 * @sdp: the filesystem
814
815
816
817
818
819
820
821
822
823
824
825
826
 *
 */
u64 gfs2_ri_total(struct gfs2_sbd *sdp)
{
	u64 total_data = 0;	
	struct inode *inode = sdp->sd_rindex;
	struct gfs2_inode *ip = GFS2_I(inode);
	char buf[sizeof(struct gfs2_rindex)];
	int error, rgrps;

	for (rgrps = 0;; rgrps++) {
		loff_t pos = rgrps * sizeof(struct gfs2_rindex);

827
		if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
828
			break;
829
		error = gfs2_internal_read(ip, buf, &pos,
830
831
832
					   sizeof(struct gfs2_rindex));
		if (error != sizeof(struct gfs2_rindex))
			break;
833
		total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
834
835
836
837
	}
	return total_data;
}

Bob Peterson's avatar
Bob Peterson committed
838
static int rgd_insert(struct gfs2_rgrpd *rgd)
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;

	/* Figure out where to put new node */
	while (*newn) {
		struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
						  rd_node);

		parent = *newn;
		if (rgd->rd_addr < cur->rd_addr)
			newn = &((*newn)->rb_left);
		else if (rgd->rd_addr > cur->rd_addr)
			newn = &((*newn)->rb_right);
		else
Bob Peterson's avatar
Bob Peterson committed
854
			return -EEXIST;
855
856
857
858
	}

	rb_link_node(&rgd->rd_node, parent, newn);
	rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
Bob Peterson's avatar
Bob Peterson committed
859
860
	sdp->sd_rgrps++;
	return 0;
861
862
}

David Teigland's avatar
David Teigland committed
863
/**
864
 * read_rindex_entry - Pull in a new resource index entry from the disk
865
 * @ip: Pointer to the rindex inode
David Teigland's avatar
David Teigland committed
866
 *
867
 * Returns: 0 on success, > 0 on EOF, error code otherwise
868
869
 */

870
static int read_rindex_entry(struct gfs2_inode *ip)
871
872
{
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
873
	const unsigned bsize = sdp->sd_sb.sb_bsize;
874
	loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
875
	struct gfs2_rindex buf;
876
877
878
	int error;
	struct gfs2_rgrpd *rgd;

879
880
881
	if (pos >= i_size_read(&ip->i_inode))
		return 1;

882
	error = gfs2_internal_read(ip, (char *)&buf, &pos,
883
				   sizeof(struct gfs2_rindex));
884
885
886

	if (error != sizeof(struct gfs2_rindex))
		return (error == 0) ? 1 : error;
887

888
	rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
889
890
891
892
893
	error = -ENOMEM;
	if (!rgd)
		return error;

	rgd->rd_sbd = sdp;
894
895
896
897
898
	rgd->rd_addr = be64_to_cpu(buf.ri_addr);
	rgd->rd_length = be32_to_cpu(buf.ri_length);
	rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
	rgd->rd_data = be32_to_cpu(buf.ri_data);
	rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
Bob Peterson's avatar
Bob Peterson committed
899
	spin_lock_init(&rgd->rd_rsspin);
900

901
902
	error = compute_bitstructs(rgd);
	if (error)
903
		goto fail;
904

905
	error = gfs2_glock_get(sdp, rgd->rd_addr,
906
907
			       &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
	if (error)
908
		goto fail;
909

910
	rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
911
	rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
912
913
	if (rgd->rd_data > sdp->sd_max_rg_data)
		sdp->sd_max_rg_data = rgd->rd_data;
914
	spin_lock(&sdp->sd_rindex_spin);
Bob Peterson's avatar
Bob Peterson committed
915
	error = rgd_insert(rgd);
916
	spin_unlock(&sdp->sd_rindex_spin);
917
	if (!error) {
918
		glock_set_object(rgd->rd_gl, rgd);
919
920
921
		rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
		rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
						    rgd->rd_length) * bsize) - 1;
Bob Peterson's avatar
Bob Peterson committed
922
		return 0;
923
	}
Bob Peterson's avatar
Bob Peterson committed
924
925

	error = 0; /* someone else read in the rgrp; free it and ignore it */
926
	gfs2_glock_put(rgd->rd_gl);
927
928
929

fail:
	kfree(rgd->rd_bits);
930
	rgd->rd_bits = NULL;
931
	kmem_cache_free(gfs2_rgrpd_cachep, rgd);
932
933
934
	return error;
}

935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
/**
 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
 * @sdp: the GFS2 superblock
 *
 * The purpose of this function is to select a subset of the resource groups
 * and mark them as PREFERRED. We do it in such a way that each node prefers
 * to use a unique set of rgrps to minimize glock contention.
 */
static void set_rgrp_preferences(struct gfs2_sbd *sdp)
{
	struct gfs2_rgrpd *rgd, *first;
	int i;

	/* Skip an initial number of rgrps, based on this node's journal ID.
	   That should start each node out on its own set. */
	rgd = gfs2_rgrpd_get_first(sdp);
	for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
		rgd = gfs2_rgrpd_get_next(rgd);
	first = rgd;

	do {
		rgd->rd_flags |= GFS2_RDF_PREFERRED;
		for (i = 0; i < sdp->sd_journals; i++) {
			rgd = gfs2_rgrpd_get_next(rgd);
959
			if (!rgd || rgd == first)
960
961
				break;
		}
962
	} while (rgd && rgd != first);
963
964
}

965
966
967
968
/**
 * gfs2_ri_update - Pull in a new resource index from the disk
 * @ip: pointer to the rindex inode
 *
David Teigland's avatar
David Teigland committed
969
970
971
 * Returns: 0 on successful update, error code otherwise
 */

972
static int gfs2_ri_update(struct gfs2_inode *ip)
David Teigland's avatar
David Teigland committed
973
{
974
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teigland's avatar
David Teigland committed
975
976
	int error;

977
	do {
978
		error = read_rindex_entry(ip);
979
980
981
982
	} while (error == 0);

	if (error < 0)
		return error;
David Teigland's avatar
David Teigland committed
983

984
985
	set_rgrp_preferences(sdp);

986
	sdp->sd_rindex_uptodate = 1;
987
988
	return 0;
}
David Teigland's avatar
David Teigland committed
989
990

/**
991
 * gfs2_rindex_update - Update the rindex if required
David Teigland's avatar
David Teigland committed
992
993
994
995
996
997
998
999
1000
1001
1002
1003
 * @sdp: The GFS2 superblock
 *
 * We grab a lock on the rindex inode to make sure that it doesn't
 * change whilst we are performing an operation. We keep this lock
 * for quite long periods of time compared to other locks. This
 * doesn't matter, since it is shared and it is very, very rarely
 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
 *
 * This makes sure that we're using the latest copy of the resource index
 * special file, which might have been updated if someone expanded the
 * filesystem (via gfs2_grow utility), which adds new resource groups.
 *
1004
 * Returns: 0 on succeess, error code otherwise
David Teigland's avatar
David Teigland committed
1005
1006
 */

1007
int gfs2_rindex_update(struct gfs2_sbd *sdp)
David Teigland's avatar
David Teigland committed
1008
{
1009
	struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
David Teigland's avatar
David Teigland committed
1010
	struct gfs2_glock *gl = ip->i_gl;
1011
1012
	struct gfs2_holder ri_gh;
	int error = 0;
1013
	int unlock_required = 0;
David Teigland's avatar
David Teigland committed
1014
1015

	/* Read new copy from disk if we don't have the latest */
1016
	if (!sdp->sd_rindex_uptodate) {
1017
1018
1019
		if (!gfs2_glock_is_locked_by_me(gl)) {
			error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
			if (error)
Bob Peterson's avatar
Bob Peterson committed
1020
				return error;
1021
1022
			unlock_required = 1;
		}
1023
		if (!sdp->sd_rindex_uptodate)
David Teigland's avatar
David Teigland committed
1024
			error = gfs2_ri_update(ip);
1025
1026
		if (unlock_required)
			gfs2_glock_dq_uninit(&ri_gh);
David Teigland's avatar
David Teigland committed
1027
1028
1029
1030
1031
	}

	return error;
}

1032
static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
1033
1034
{
	const struct gfs2_rgrp *str = buf;
1035
	u32 rg_flags;
1036

1037
	rg_flags = be32_to_cpu(str->rg_flags);
1038
	rg_flags &= ~GFS2_RDF_MASK;
1039
1040
	rgd->rd_flags &= GFS2_RDF_MASK;
	rgd->rd_flags |= rg_flags;
1041
	rgd->rd_free = be32_to_cpu(str->rg_free);
1042
	rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
1043
	rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
1044
1045
}

1046
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
1047
1048
1049
{
	struct gfs2_rgrp *str = buf;

1050
	str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
1051
	str->rg_free = cpu_to_be32(rgd->rd_free);
1052
	str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
1053
	str->__pad = cpu_to_be32(0);
1054
	str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
1055
1056
1057
	memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
}

1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
{
	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
	struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;

	if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
	    rgl->rl_dinodes != str->rg_dinodes ||
	    rgl->rl_igeneration != str->rg_igeneration)
		return 0;
	return 1;
}

static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
{
	const struct gfs2_rgrp *str = buf;

	rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
	rgl->rl_flags = str->rg_flags;
	rgl->rl_free = str->rg_free;
	rgl->rl_dinodes = str->rg_dinodes;
	rgl->rl_igeneration = str->rg_igeneration;
	rgl->__pad = 0UL;
}

static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
{
	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
	u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
	rgl->rl_unlinked = cpu_to_be32(unlinked);
}

static u32 count_unlinked(struct gfs2_rgrpd *rgd)
{
	struct gfs2_bitmap *bi;
	const u32 length = rgd->rd_length;
	const u8 *buffer = NULL;
	u32 i, goal, count = 0;

	for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
		goal = 0;
		buffer = bi->bi_bh->b_data + bi->bi_offset;
		WARN_ON(!buffer_uptodate(bi->bi_bh));
		while (goal < bi->bi_len * GFS2_NBBY) {
			goal = gfs2_bitfit(buffer, bi->bi_len, goal,
					   GFS2_BLKST_UNLINKED);
			if (goal == BFITNOENT)
				break;
			count++;
			goal++;
		}
	}

	return count;
}


David Teigland's avatar
David Teigland committed
1114
/**
1115
1116
 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
 * @rgd: the struct gfs2_rgrpd describing the RG to read in
David Teigland's avatar
David Teigland committed
1117
1118
 *
 * Read in all of a Resource Group's header and bitmap blocks.
1119
 * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
David Teigland's avatar
David Teigland committed
1120
1121
1122
1123
 *
 * Returns: errno
 */

1124
static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
David Teigland's avatar
David Teigland committed
1125
1126
1127
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_glock *gl = rgd->rd_gl;
1128
	unsigned int length = rgd->rd_length;
David Teigland's avatar
David Teigland committed
1129
1130
1131
1132
	struct gfs2_bitmap *bi;
	unsigned int x, y;
	int error;

1133
1134
1135
	if (rgd->rd_bits[0].bi_bh != NULL)
		return 0;

David Teigland's avatar
David Teigland committed
1136
1137
	for (x = 0; x < length; x++) {
		bi = rgd->rd_bits + x;
1138
		error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
David Teigland's avatar
David Teigland committed
1139
1140
1141
1142
1143
1144
		if (error)
			goto fail;
	}

	for (y = length; y--;) {
		bi = rgd->rd_bits + y;
1145
		error = gfs2_meta_wait(sdp, bi->bi_bh);
David Teigland's avatar
David Teigland committed
1146
1147
		if (error)
			goto fail;
1148
		if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
David Teigland's avatar
David Teigland committed
1149
1150
1151
1152
1153
1154
					      GFS2_METATYPE_RG)) {
			error = -EIO;
			goto fail;
		}
	}

1155
	if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1156
1157
		for (x = 0; x < length; x++)
			clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1158
		gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
Steven Whitehouse's avatar