inode.c 56.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
/*
 * (C) 1997 Linus Torvalds
3
 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
Linus Torvalds's avatar
Linus Torvalds committed
4
 */
Al Viro's avatar
Al Viro committed
5
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
6
7
8
9
10
11
12
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
13
#include <linux/memblock.h>
14
#include <linux/fsnotify.h>
15
#include <linux/mount.h>
Al Viro's avatar
Al Viro committed
16
#include <linux/posix_acl.h>
17
#include <linux/prefetch.h>
18
#include <linux/buffer_head.h> /* for inode_has_buffers */
Miklos Szeredi's avatar
Miklos Szeredi committed
19
#include <linux/ratelimit.h>
20
#include <linux/list_lru.h>
21
#include <linux/iversion.h>
22
#include <trace/events/writeback.h>
23
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
24

25
/*
26
 * Inode locking rules:
27
28
29
 *
 * inode->i_lock protects:
 *   inode->i_state, inode->i_hash, __iget()
30
 * Inode LRU list locks protect:
31
 *   inode->i_sb->s_inode_lru, inode->i_lru
32
33
 * inode->i_sb->s_inode_list_lock protects:
 *   inode->i_sb->s_inodes, inode->i_sb_list
34
 * bdi->wb.list_lock protects:
35
 *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
36
37
 * inode_hash_lock protects:
 *   inode_hashtable, inode->i_hash
38
39
 *
 * Lock ordering:
40
 *
41
 * inode->i_sb->s_inode_list_lock
42
 *   inode->i_lock
43
 *     Inode LRU list locks
44
 *
45
 * bdi->wb.list_lock
46
 *   inode->i_lock
47
48
 *
 * inode_hash_lock
49
 *   inode->i_sb->s_inode_list_lock
50
51
52
53
 *   inode->i_lock
 *
 * iunique_lock
 *   inode_hash_lock
54
55
 */

56
57
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
58
59
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
60

Jens Axboe's avatar
Jens Axboe committed
61
62
63
64
65
66
67
68
/*
 * Empty aops. Can be used for the cases where the user does not
 * define any of the address_space operations.
 */
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);

Linus Torvalds's avatar
Linus Torvalds committed
69
70
71
72
73
/*
 * Statistics gathering..
 */
struct inodes_stat_t inodes_stat;

74
75
static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);
76

77
static struct kmem_cache *inode_cachep __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
78

79
static long get_nr_inodes(void)
80
{
81
	int i;
82
	long sum = 0;
83
84
85
	for_each_possible_cpu(i)
		sum += per_cpu(nr_inodes, i);
	return sum < 0 ? 0 : sum;
86
87
}

88
static inline long get_nr_inodes_unused(void)
89
{
90
	int i;
91
	long sum = 0;
92
93
94
	for_each_possible_cpu(i)
		sum += per_cpu(nr_unused, i);
	return sum < 0 ? 0 : sum;
95
96
}

97
long get_nr_dirty_inodes(void)
98
{
99
	/* not actually dirty inodes, but a wild approximation */
100
	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
101
102
103
104
105
106
107
	return nr_dirty > 0 ? nr_dirty : 0;
}

/*
 * Handle nr_inode sysctl
 */
#ifdef CONFIG_SYSCTL
108
int proc_nr_inodes(struct ctl_table *table, int write,
109
110
111
		   void __user *buffer, size_t *lenp, loff_t *ppos)
{
	inodes_stat.nr_inodes = get_nr_inodes();
112
	inodes_stat.nr_unused = get_nr_inodes_unused();
113
	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
114
115
116
}
#endif

117
118
119
120
121
static int no_open(struct inode *inode, struct file *file)
{
	return -ENXIO;
}

122
/**
123
 * inode_init_always - perform inode structure initialisation
124
125
 * @sb: superblock inode belongs to
 * @inode: inode to initialise
126
127
128
129
 *
 * These are initializations that need to be done on every inode
 * allocation as the fields are not initialised by slab allocation.
 */
130
int inode_init_always(struct super_block *sb, struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
131
{
132
	static const struct inode_operations empty_iops;
133
	static const struct file_operations no_open_fops = {.open = no_open};
134
	struct address_space *const mapping = &inode->i_data;
135
136
137
138
139
140

	inode->i_sb = sb;
	inode->i_blkbits = sb->s_blocksize_bits;
	inode->i_flags = 0;
	atomic_set(&inode->i_count, 1);
	inode->i_op = &empty_iops;
141
	inode->i_fop = &no_open_fops;
Miklos Szeredi's avatar
Miklos Szeredi committed
142
	inode->__i_nlink = 1;
143
	inode->i_opflags = 0;
144
145
	if (sb->s_xattr)
		inode->i_opflags |= IOP_XATTR;
146
147
	i_uid_write(inode, 0);
	i_gid_write(inode, 0);
148
149
	atomic_set(&inode->i_writecount, 0);
	inode->i_size = 0;
150
	inode->i_write_hint = WRITE_LIFE_NOT_SET;
151
152
153
154
155
156
	inode->i_blocks = 0;
	inode->i_bytes = 0;
	inode->i_generation = 0;
	inode->i_pipe = NULL;
	inode->i_bdev = NULL;
	inode->i_cdev = NULL;
Al Viro's avatar
Al Viro committed
157
	inode->i_link = NULL;
Al Viro's avatar
Al Viro committed
158
	inode->i_dir_seq = 0;
159
160
	inode->i_rdev = 0;
	inode->dirtied_when = 0;
Mimi Zohar's avatar
Mimi Zohar committed
161

162
163
164
165
166
167
#ifdef CONFIG_CGROUP_WRITEBACK
	inode->i_wb_frn_winner = 0;
	inode->i_wb_frn_avg_time = 0;
	inode->i_wb_frn_history = 0;
#endif

Mimi Zohar's avatar
Mimi Zohar committed
168
	if (security_inode_alloc(inode))
169
		goto out;
170
171
172
	spin_lock_init(&inode->i_lock);
	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);

173
174
	init_rwsem(&inode->i_rwsem);
	lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
175

Christoph Hellwig's avatar
Christoph Hellwig committed
176
	atomic_set(&inode->i_dio_count, 0);
177
178
179
180

	mapping->a_ops = &empty_aops;
	mapping->host = inode;
	mapping->flags = 0;
181
	mapping->wb_err = 0;
182
	atomic_set(&mapping->i_mmap_writable, 0);
183
	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
184
	mapping->private_data = NULL;
185
186
187
	mapping->writeback_index = 0;
	inode->i_private = NULL;
	inode->i_mapping = mapping;
188
	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
Al Viro's avatar
Al Viro committed
189
190
191
#ifdef CONFIG_FS_POSIX_ACL
	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
192

193
194
195
#ifdef CONFIG_FSNOTIFY
	inode->i_fsnotify_mask = 0;
#endif
196
	inode->i_flctx = NULL;
197
	this_cpu_inc(nr_inodes);
198

199
200
201
	return 0;
out:
	return -ENOMEM;
Linus Torvalds's avatar
Linus Torvalds committed
202
}
203
204
205
206
207
208
209
210
211
212
213
EXPORT_SYMBOL(inode_init_always);

static struct inode *alloc_inode(struct super_block *sb)
{
	struct inode *inode;

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);

214
215
216
217
218
219
220
221
222
223
224
225
	if (!inode)
		return NULL;

	if (unlikely(inode_init_always(sb, inode))) {
		if (inode->i_sb->s_op->destroy_inode)
			inode->i_sb->s_op->destroy_inode(inode);
		else
			kmem_cache_free(inode_cachep, inode);
		return NULL;
	}

	return inode;
226
}
Linus Torvalds's avatar
Linus Torvalds committed
227

228
229
230
231
232
233
void free_inode_nonrcu(struct inode *inode)
{
	kmem_cache_free(inode_cachep, inode);
}
EXPORT_SYMBOL(free_inode_nonrcu);

Christoph Hellwig's avatar
Christoph Hellwig committed
234
void __destroy_inode(struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
235
{
236
	BUG_ON(inode_has_buffers(inode));
237
	inode_detach_wb(inode);
Linus Torvalds's avatar
Linus Torvalds committed
238
	security_inode_free(inode);
239
	fsnotify_inode_delete(inode);
240
	locks_free_lock_context(inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
241
242
243
244
245
	if (!inode->i_nlink) {
		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
		atomic_long_dec(&inode->i_sb->s_remove_count);
	}

Al Viro's avatar
Al Viro committed
246
#ifdef CONFIG_FS_POSIX_ACL
247
	if (inode->i_acl && !is_uncached_acl(inode->i_acl))
Al Viro's avatar
Al Viro committed
248
		posix_acl_release(inode->i_acl);
249
	if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
Al Viro's avatar
Al Viro committed
250
251
		posix_acl_release(inode->i_default_acl);
#endif
252
	this_cpu_dec(nr_inodes);
Christoph Hellwig's avatar
Christoph Hellwig committed
253
254
255
}
EXPORT_SYMBOL(__destroy_inode);

Nick Piggin's avatar
Nick Piggin committed
256
257
258
259
260
261
static void i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(inode_cachep, inode);
}

262
static void destroy_inode(struct inode *inode)
Christoph Hellwig's avatar
Christoph Hellwig committed
263
{
Nick Piggin's avatar
Nick Piggin committed
264
	BUG_ON(!list_empty(&inode->i_lru));
Christoph Hellwig's avatar
Christoph Hellwig committed
265
	__destroy_inode(inode);
Linus Torvalds's avatar
Linus Torvalds committed
266
267
268
	if (inode->i_sb->s_op->destroy_inode)
		inode->i_sb->s_op->destroy_inode(inode);
	else
Nick Piggin's avatar
Nick Piggin committed
269
		call_rcu(&inode->i_rcu, i_callback);
Linus Torvalds's avatar
Linus Torvalds committed
270
271
}

Miklos Szeredi's avatar
Miklos Szeredi committed
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
/**
 * drop_nlink - directly drop an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  In cases
 * where we are attempting to track writes to the
 * filesystem, a decrement to zero means an imminent
 * write when the file is truncated and actually unlinked
 * on the filesystem.
 */
void drop_nlink(struct inode *inode)
{
	WARN_ON(inode->i_nlink == 0);
	inode->__i_nlink--;
	if (!inode->i_nlink)
		atomic_long_inc(&inode->i_sb->s_remove_count);
}
EXPORT_SYMBOL(drop_nlink);

/**
 * clear_nlink - directly zero an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  See
 * drop_nlink() for why we care about i_nlink hitting zero.
 */
void clear_nlink(struct inode *inode)
{
	if (inode->i_nlink) {
		inode->__i_nlink = 0;
		atomic_long_inc(&inode->i_sb->s_remove_count);
	}
}
EXPORT_SYMBOL(clear_nlink);

/**
 * set_nlink - directly set an inode's link count
 * @inode: inode
 * @nlink: new nlink (should be non-zero)
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.
 */
void set_nlink(struct inode *inode, unsigned int nlink)
{
	if (!nlink) {
		clear_nlink(inode);
	} else {
		/* Yes, some filesystems do change nlink from zero to one */
		if (inode->i_nlink == 0)
			atomic_long_dec(&inode->i_sb->s_remove_count);

		inode->__i_nlink = nlink;
	}
}
EXPORT_SYMBOL(set_nlink);

/**
 * inc_nlink - directly increment an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  Currently,
 * it is only here for parity with dec_nlink().
 */
void inc_nlink(struct inode *inode)
{
341
342
	if (unlikely(inode->i_nlink == 0)) {
		WARN_ON(!(inode->i_state & I_LINKABLE));
Miklos Szeredi's avatar
Miklos Szeredi committed
343
		atomic_long_dec(&inode->i_sb->s_remove_count);
344
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
345
346
347
348
349

	inode->__i_nlink++;
}
EXPORT_SYMBOL(inc_nlink);

350
static void __address_space_init_once(struct address_space *mapping)
351
{
352
	xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ);
353
	init_rwsem(&mapping->i_mmap_rwsem);
354
355
	INIT_LIST_HEAD(&mapping->private_list);
	spin_lock_init(&mapping->private_lock);
356
	mapping->i_mmap = RB_ROOT_CACHED;
357
}
358
359
360
361
362
363

void address_space_init_once(struct address_space *mapping)
{
	memset(mapping, 0, sizeof(*mapping));
	__address_space_init_once(mapping);
}
364
365
EXPORT_SYMBOL(address_space_init_once);

Linus Torvalds's avatar
Linus Torvalds committed
366
367
368
369
370
371
372
373
374
375
/*
 * These are initializations that only need to be done
 * once, because the fields are idempotent across use
 * of the inode, so let the slab aware of that.
 */
void inode_init_once(struct inode *inode)
{
	memset(inode, 0, sizeof(*inode));
	INIT_HLIST_NODE(&inode->i_hash);
	INIT_LIST_HEAD(&inode->i_devices);
376
	INIT_LIST_HEAD(&inode->i_io_list);
377
	INIT_LIST_HEAD(&inode->i_wb_list);
Nick Piggin's avatar
Nick Piggin committed
378
	INIT_LIST_HEAD(&inode->i_lru);
379
	__address_space_init_once(&inode->i_data);
Linus Torvalds's avatar
Linus Torvalds committed
380
381
382
383
	i_size_ordered_init(inode);
}
EXPORT_SYMBOL(inode_init_once);

384
static void init_once(void *foo)
Linus Torvalds's avatar
Linus Torvalds committed
385
{
386
	struct inode *inode = (struct inode *) foo;
Linus Torvalds's avatar
Linus Torvalds committed
387

388
	inode_init_once(inode);
Linus Torvalds's avatar
Linus Torvalds committed
389
390
391
}

/*
392
 * inode->i_lock must be held
Linus Torvalds's avatar
Linus Torvalds committed
393
 */
394
void __iget(struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
395
{
396
397
	atomic_inc(&inode->i_count);
}
398

Al Viro's avatar
Al Viro committed
399
400
401
402
403
404
405
406
407
/*
 * get additional reference to inode; caller must already hold one.
 */
void ihold(struct inode *inode)
{
	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
}
EXPORT_SYMBOL(ihold);

408
409
static void inode_lru_list_add(struct inode *inode)
{
410
	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
411
		this_cpu_inc(nr_unused);
412
413
	else
		inode->i_state |= I_REFERENCED;
414
}
415

416
417
418
419
420
421
422
/*
 * Add inode to LRU if needed (inode is unused and clean).
 *
 * Needs inode->i_lock held.
 */
void inode_add_lru(struct inode *inode)
{
423
424
	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
				I_FREEING | I_WILL_FREE)) &&
425
	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
426
427
428
429
		inode_lru_list_add(inode);
}


430
431
static void inode_lru_list_del(struct inode *inode)
{
432
433

	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
434
		this_cpu_dec(nr_unused);
Linus Torvalds's avatar
Linus Torvalds committed
435
436
}

437
438
439
440
441
442
/**
 * inode_sb_list_add - add inode to the superblock list of inodes
 * @inode: inode to add
 */
void inode_sb_list_add(struct inode *inode)
{
443
	spin_lock(&inode->i_sb->s_inode_list_lock);
444
	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
445
	spin_unlock(&inode->i_sb->s_inode_list_lock);
446
447
448
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);

449
static inline void inode_sb_list_del(struct inode *inode)
450
{
451
	if (!list_empty(&inode->i_sb_list)) {
452
		spin_lock(&inode->i_sb->s_inode_list_lock);
453
		list_del_init(&inode->i_sb_list);
454
		spin_unlock(&inode->i_sb->s_inode_list_lock);
455
	}
456
457
}

458
459
460
461
462
463
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
464
465
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
	return tmp & i_hash_mask;
466
467
468
469
470
471
472
473
474
475
476
477
}

/**
 *	__insert_inode_hash - hash an inode
 *	@inode: unhashed inode
 *	@hashval: unsigned long value used to locate this object in the
 *		inode_hashtable.
 *
 *	Add an inode to the inode hash for this superblock.
 */
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
478
479
	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);

480
	spin_lock(&inode_hash_lock);
481
	spin_lock(&inode->i_lock);
482
	hlist_add_head(&inode->i_hash, b);
483
	spin_unlock(&inode->i_lock);
484
	spin_unlock(&inode_hash_lock);
485
486
487
488
}
EXPORT_SYMBOL(__insert_inode_hash);

/**
489
 *	__remove_inode_hash - remove an inode from the hash
490
491
492
493
 *	@inode: inode to unhash
 *
 *	Remove an inode from the superblock.
 */
494
void __remove_inode_hash(struct inode *inode)
495
{
496
	spin_lock(&inode_hash_lock);
497
	spin_lock(&inode->i_lock);
498
	hlist_del_init(&inode->i_hash);
499
	spin_unlock(&inode->i_lock);
500
	spin_unlock(&inode_hash_lock);
501
}
502
EXPORT_SYMBOL(__remove_inode_hash);
503

504
void clear_inode(struct inode *inode)
Al Viro's avatar
Al Viro committed
505
{
506
	/*
Matthew Wilcox's avatar
Matthew Wilcox committed
507
	 * We have to cycle the i_pages lock here because reclaim can be in the
508
	 * process of removing the last page (in __delete_from_page_cache())
Matthew Wilcox's avatar
Matthew Wilcox committed
509
	 * and we must not free the mapping under it.
510
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
511
	xa_lock_irq(&inode->i_data.i_pages);
Al Viro's avatar
Al Viro committed
512
	BUG_ON(inode->i_data.nrpages);
513
	BUG_ON(inode->i_data.nrexceptional);
Matthew Wilcox's avatar
Matthew Wilcox committed
514
	xa_unlock_irq(&inode->i_data.i_pages);
Al Viro's avatar
Al Viro committed
515
516
517
	BUG_ON(!list_empty(&inode->i_data.private_list));
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(inode->i_state & I_CLEAR);
518
	BUG_ON(!list_empty(&inode->i_wb_list));
Nick Piggin's avatar
Nick Piggin committed
519
	/* don't need i_lock here, no concurrent mods to i_state */
Al Viro's avatar
Al Viro committed
520
521
	inode->i_state = I_FREEING | I_CLEAR;
}
522
EXPORT_SYMBOL(clear_inode);
Al Viro's avatar
Al Viro committed
523

Dave Chinner's avatar
Dave Chinner committed
524
525
526
527
528
529
530
531
532
533
534
535
536
/*
 * Free the inode passed in, removing it from the lists it is still connected
 * to. We remove any pages still attached to the inode and wait for any IO that
 * is still in progress before finally destroying the inode.
 *
 * An inode must already be marked I_FREEING so that we avoid the inode being
 * moved back onto lists if we race with other code that manipulates the lists
 * (e.g. writeback_single_inode). The caller is responsible for setting this.
 *
 * An inode must already be removed from the LRU list before being evicted from
 * the cache. This should occur atomically with setting the I_FREEING state
 * flag, so no inodes here should ever be on the LRU when being evicted.
 */
537
static void evict(struct inode *inode)
538
539
540
{
	const struct super_operations *op = inode->i_sb->s_op;

Dave Chinner's avatar
Dave Chinner committed
541
542
543
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(!list_empty(&inode->i_lru));

544
545
	if (!list_empty(&inode->i_io_list))
		inode_io_list_del(inode);
546

547
548
	inode_sb_list_del(inode);

549
550
551
552
553
554
555
	/*
	 * Wait for flusher thread to be done with the inode so that filesystem
	 * does not start destroying it while writeback is still running. Since
	 * the inode has I_FREEING set, flusher thread won't start new work on
	 * the inode.  We just have to wait for running writeback to finish.
	 */
	inode_wait_for_writeback(inode);
556

Al Viro's avatar
Al Viro committed
557
558
	if (op->evict_inode) {
		op->evict_inode(inode);
559
	} else {
560
		truncate_inode_pages_final(&inode->i_data);
561
		clear_inode(inode);
562
	}
563
564
565
566
	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
		bd_forget(inode);
	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
		cd_forget(inode);
Dave Chinner's avatar
Dave Chinner committed
567
568
569
570
571
572
573
574
575

	remove_inode_hash(inode);

	spin_lock(&inode->i_lock);
	wake_up_bit(&inode->i_state, __I_NEW);
	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
	spin_unlock(&inode->i_lock);

	destroy_inode(inode);
576
577
}

Linus Torvalds's avatar
Linus Torvalds committed
578
579
580
581
582
583
584
585
586
587
588
589
/*
 * dispose_list - dispose of the contents of a local list
 * @head: the head of the list to free
 *
 * Dispose-list gets a local list with local inodes in it, so it doesn't
 * need to worry about list corruption and SMP locks.
 */
static void dispose_list(struct list_head *head)
{
	while (!list_empty(head)) {
		struct inode *inode;

Nick Piggin's avatar
Nick Piggin committed
590
591
		inode = list_first_entry(head, struct inode, i_lru);
		list_del_init(&inode->i_lru);
Linus Torvalds's avatar
Linus Torvalds committed
592

593
		evict(inode);
594
		cond_resched();
Linus Torvalds's avatar
Linus Torvalds committed
595
596
597
	}
}

Al Viro's avatar
Al Viro committed
598
599
600
601
602
/**
 * evict_inodes	- evict all evictable inodes for a superblock
 * @sb:		superblock to operate on
 *
 * Make sure that no inodes with zero refcount are retained.  This is
603
 * called by superblock shutdown after having SB_ACTIVE flag removed,
Al Viro's avatar
Al Viro committed
604
605
 * so any inode reaching zero refcount during or after that call will
 * be immediately evicted.
Linus Torvalds's avatar
Linus Torvalds committed
606
 */
Al Viro's avatar
Al Viro committed
607
void evict_inodes(struct super_block *sb)
Linus Torvalds's avatar
Linus Torvalds committed
608
{
Al Viro's avatar
Al Viro committed
609
610
	struct inode *inode, *next;
	LIST_HEAD(dispose);
Linus Torvalds's avatar
Linus Torvalds committed
611

612
again:
613
	spin_lock(&sb->s_inode_list_lock);
Al Viro's avatar
Al Viro committed
614
615
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
		if (atomic_read(&inode->i_count))
Nick Piggin's avatar
Nick Piggin committed
616
			continue;
617
618
619
620

		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
Linus Torvalds's avatar
Linus Torvalds committed
621
			continue;
622
		}
Al Viro's avatar
Al Viro committed
623
624

		inode->i_state |= I_FREEING;
625
		inode_lru_list_del(inode);
626
		spin_unlock(&inode->i_lock);
627
		list_add(&inode->i_lru, &dispose);
628
629
630
631
632
633
634
635
636
637
638
639

		/*
		 * We can have a ton of inodes to evict at unmount time given
		 * enough memory, check to see if we need to go to sleep for a
		 * bit so we don't livelock.
		 */
		if (need_resched()) {
			spin_unlock(&sb->s_inode_list_lock);
			cond_resched();
			dispose_list(&dispose);
			goto again;
		}
Linus Torvalds's avatar
Linus Torvalds committed
640
	}
641
	spin_unlock(&sb->s_inode_list_lock);
Al Viro's avatar
Al Viro committed
642
643

	dispose_list(&dispose);
Linus Torvalds's avatar
Linus Torvalds committed
644
}
645
EXPORT_SYMBOL_GPL(evict_inodes);
Linus Torvalds's avatar
Linus Torvalds committed
646
647

/**
648
649
 * invalidate_inodes	- attempt to free all inodes on a superblock
 * @sb:		superblock to operate on
650
 * @kill_dirty: flag to guide handling of dirty inodes
Linus Torvalds's avatar
Linus Torvalds committed
651
 *
652
653
 * Attempts to free all inodes for a given superblock.  If there were any
 * busy inodes return a non-zero value, else zero.
654
655
 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
 * them as busy.
Linus Torvalds's avatar
Linus Torvalds committed
656
 */
657
int invalidate_inodes(struct super_block *sb, bool kill_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
658
{
659
	int busy = 0;
660
661
	struct inode *inode, *next;
	LIST_HEAD(dispose);
Linus Torvalds's avatar
Linus Torvalds committed
662

663
	spin_lock(&sb->s_inode_list_lock);
664
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
665
666
667
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
Nick Piggin's avatar
Nick Piggin committed
668
			continue;
669
		}
670
		if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
671
			spin_unlock(&inode->i_lock);
672
673
674
			busy = 1;
			continue;
		}
675
		if (atomic_read(&inode->i_count)) {
676
			spin_unlock(&inode->i_lock);
677
			busy = 1;
Linus Torvalds's avatar
Linus Torvalds committed
678
679
			continue;
		}
680
681

		inode->i_state |= I_FREEING;
682
		inode_lru_list_del(inode);
683
		spin_unlock(&inode->i_lock);
684
		list_add(&inode->i_lru, &dispose);
Linus Torvalds's avatar
Linus Torvalds committed
685
	}
686
	spin_unlock(&sb->s_inode_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
687

688
	dispose_list(&dispose);
Linus Torvalds's avatar
Linus Torvalds committed
689
690
691
692
693

	return busy;
}

/*
694
 * Isolate the inode from the LRU in preparation for freeing it.
Linus Torvalds's avatar
Linus Torvalds committed
695
696
 *
 * Any inodes which are pinned purely because of attached pagecache have their
697
698
 * pagecache removed.  If the inode has metadata buffers attached to
 * mapping->private_list then try to remove them.
Linus Torvalds's avatar
Linus Torvalds committed
699
 *
700
701
702
703
704
705
706
 * If the inode has the I_REFERENCED flag set, then it means that it has been
 * used recently - the flag is set in iput_final(). When we encounter such an
 * inode, clear the flag and move it to the back of the LRU so it gets another
 * pass through the LRU before it gets reclaimed. This is necessary because of
 * the fact we are doing lazy LRU updates to minimise lock contention so the
 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 * with this flag set because they are the inodes that are out of order.
Linus Torvalds's avatar
Linus Torvalds committed
707
 */
708
709
static enum lru_status inode_lru_isolate(struct list_head *item,
		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
Linus Torvalds's avatar
Linus Torvalds committed
710
{
711
712
	struct list_head *freeable = arg;
	struct inode	*inode = container_of(item, struct inode, i_lru);
Linus Torvalds's avatar
Linus Torvalds committed
713

714
715
716
717
718
719
	/*
	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
	 * If we fail to get the lock, just skip it.
	 */
	if (!spin_trylock(&inode->i_lock))
		return LRU_SKIP;
Linus Torvalds's avatar
Linus Torvalds committed
720

721
722
723
724
725
726
	/*
	 * Referenced or dirty inodes are still in use. Give them another pass
	 * through the LRU as we canot reclaim them now.
	 */
	if (atomic_read(&inode->i_count) ||
	    (inode->i_state & ~I_REFERENCED)) {
727
		list_lru_isolate(lru, &inode->i_lru);
728
729
730
731
		spin_unlock(&inode->i_lock);
		this_cpu_dec(nr_unused);
		return LRU_REMOVED;
	}
Linus Torvalds's avatar
Linus Torvalds committed
732

733
734
735
736
737
	/*
	 * Recently referenced inodes and inodes with many attached pages
	 * get one more pass.
	 */
	if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
738
739
740
741
		inode->i_state &= ~I_REFERENCED;
		spin_unlock(&inode->i_lock);
		return LRU_ROTATE;
	}
Linus Torvalds's avatar
Linus Torvalds committed
742

743
744
745
746
747
748
749
750
751
752
753
754
755
	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(lru_lock);
		if (remove_inode_buffers(inode)) {
			unsigned long reap;
			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
			if (current_is_kswapd())
				__count_vm_events(KSWAPD_INODESTEAL, reap);
			else
				__count_vm_events(PGINODESTEAL, reap);
			if (current->reclaim_state)
				current->reclaim_state->reclaimed_slab += reap;
756
		}
757
758
759
760
		iput(inode);
		spin_lock(lru_lock);
		return LRU_RETRY;
	}
761

762
763
	WARN_ON(inode->i_state & I_NEW);
	inode->i_state |= I_FREEING;
764
	list_lru_isolate_move(lru, &inode->i_lru, freeable);
765
	spin_unlock(&inode->i_lock);
766

767
768
769
	this_cpu_dec(nr_unused);
	return LRU_REMOVED;
}
Nick Piggin's avatar
Nick Piggin committed
770

771
772
773
774
775
776
/*
 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
 * This is called from the superblock shrinker function with a number of inodes
 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
 * then are freed outside inode_lock by dispose_list().
 */
777
long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
778
779
780
{
	LIST_HEAD(freeable);
	long freed;
Linus Torvalds's avatar
Linus Torvalds committed
781

782
783
	freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
				     inode_lru_isolate, &freeable);
Linus Torvalds's avatar
Linus Torvalds committed
784
	dispose_list(&freeable);
785
	return freed;
Linus Torvalds's avatar
Linus Torvalds committed
786
787
788
789
790
791
}

static void __wait_on_freeing_inode(struct inode *inode);
/*
 * Called with the inode lock held.
 */
792
793
794
795
static struct inode *find_inode(struct super_block *sb,
				struct hlist_head *head,
				int (*test)(struct inode *, void *),
				void *data)
Linus Torvalds's avatar
Linus Torvalds committed
796
{
797
	struct inode *inode = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
798
799

repeat:
800
	hlist_for_each_entry(inode, head, i_hash) {
801
		if (inode->i_sb != sb)
Linus Torvalds's avatar
Linus Torvalds committed
802
			continue;
803
		if (!test(inode, data))
Linus Torvalds's avatar
Linus Torvalds committed
804
			continue;
805
		spin_lock(&inode->i_lock);
806
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds's avatar
Linus Torvalds committed
807
808
809
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
Al Viro's avatar
Al Viro committed
810
811
812
813
		if (unlikely(inode->i_state & I_CREATING)) {
			spin_unlock(&inode->i_lock);
			return ERR_PTR(-ESTALE);
		}
814
		__iget(inode);
815
		spin_unlock(&inode->i_lock);
816
		return inode;
Linus Torvalds's avatar
Linus Torvalds committed
817
	}
818
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
819
820
821
822
823
824
}

/*
 * find_inode_fast is the fast path version of find_inode, see the comment at
 * iget_locked for details.
 */
825
826
static struct inode *find_inode_fast(struct super_block *sb,
				struct hlist_head *head, unsigned long ino)
Linus Torvalds's avatar
Linus Torvalds committed
827
{
828
	struct inode *inode = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
829
830

repeat:
831
	hlist_for_each_entry(inode, head, i_hash) {
832
		if (inode->i_ino != ino)
Linus Torvalds's avatar
Linus Torvalds committed
833
			continue;
834
		if (inode->i_sb != sb)
Linus Torvalds's avatar
Linus Torvalds committed
835
			continue;
836
		spin_lock(&inode->i_lock);
837
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds's avatar
Linus Torvalds committed
838
839
840
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
Al Viro's avatar
Al Viro committed
841
842
843
844
		if (unlikely(inode->i_state & I_CREATING)) {
			spin_unlock(&inode->i_lock);
			return ERR_PTR(-ESTALE);
		}
845
		__iget(inode);
846
		spin_unlock(&inode->i_lock);
847
		return inode;
Linus Torvalds's avatar
Linus Torvalds committed
848
	}
849
	return NULL;
850
851
}

852
853
854
855
/*
 * Each cpu owns a range of LAST_INO_BATCH numbers.
 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
 * to renew the exhausted range.
856
 *
857
858
859
860
861
862
863
864
865
 * This does not significantly increase overflow rate because every CPU can
 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
 * overflow rate by 2x, which does not seem too significant.
 *
 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 * error if st_ino won't fit in target struct field. Use 32bit counter
 * here to attempt to avoid that.
866
 */
867
868
869
#define LAST_INO_BATCH 1024
static DEFINE_PER_CPU(unsigned int, last_ino);

870
unsigned int get_next_ino(void)
871
{
872
873
	unsigned int *p = &get_cpu_var(last_ino);
	unsigned int res = *p;
874

875
876
877
878
879
880
881
882
883
#ifdef CONFIG_SMP
	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
		static atomic_t shared_last_ino;
		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);

		res = next - LAST_INO_BATCH;
	}
#endif

884
885
886
887
888
	res++;
	/* get_next_ino should not provide a 0 inode number */
	if (unlikely(!res))
		res++;
	*p = res;
889
890
	put_cpu_var(last_ino);
	return res;
891
}
892
EXPORT_SYMBOL(get_next_ino);
893

894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
/**
 *	new_inode_pseudo 	- obtain an inode
 *	@sb: superblock
 *
 *	Allocates a new inode for given superblock.
 *	Inode wont be chained in superblock s_inodes list
 *	This means :
 *	- fs can't be unmount
 *	- quotas, fsnotify, writeback can't work
 */
struct inode *new_inode_pseudo(struct super_block *sb)
{
	struct inode *inode = alloc_inode(sb);

	if (inode) {
		spin_lock(&inode->i_lock);
		inode->i_state = 0;
		spin_unlock(&inode->i_lock);
		INIT_LIST_HEAD(&inode->i_sb_list);
	}
	return inode;
}

Linus Torvalds's avatar
Linus Torvalds committed
917
918
919
920
/**
 *	new_inode 	- obtain an inode
 *	@sb: superblock
 *
921
 *	Allocates a new inode for given superblock. The default gfp_mask
922
 *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
923
924
925
926
927
 *	If HIGHMEM pages are unsuitable or it is known that pages allocated
 *	for the page cache are not reclaimable or migratable,
 *	mapping_set_gfp_mask() must be called with suitable flags on the
 *	newly created inode's mapping
 *
Linus Torvalds's avatar
Linus Torvalds committed
928
929
930
 */
struct inode *new_inode(struct super_block *sb)
{
931
	struct inode *inode;
Linus Torvalds's avatar
Linus Torvalds committed
932

933
	spin_lock_prefetch(&sb->s_inode_list_lock);
934

935
936
	inode = new_inode_pseudo(sb);
	if (inode)
937
		inode_sb_list_add(inode);
Linus Torvalds's avatar
Linus Torvalds committed
938
939
940
941
	return inode;
}
EXPORT_SYMBOL(new_inode);

942
#ifdef CONFIG_DEBUG_LOCK_ALLOC
943
944
void lockdep_annotate_inode_mutex_key(struct inode *inode)
{
945
	if (S_ISDIR(inode->i_mode)) {
946
947
		struct file_system_type *type = inode->i_sb->s_type;

948
		/* Set new key only if filesystem hasn't already changed it */
949
		if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
950
951
952
			/*
			 * ensure nobody is actually holding i_mutex
			 */
953
954
955
			// mutex_destroy(&inode->i_mutex);
			init_rwsem(&inode->i_rwsem);
			lockdep_set_class(&inode->i_rwsem,
956
957
					  &type->i_mutex_dir_key);
		}
958
	}
959
960
}
EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
961
#endif
962
963
964
965
966
967
968
969
970
971
972

/**
 * unlock_new_inode - clear the I_NEW state and wake up any waiters
 * @inode:	new inode to unlock
 *
 * Called when the inode is fully initialised to clear the new state of the
 * inode and wake up anyone waiting for the inode to finish initialisation.
 */
void unlock_new_inode(struct inode *inode)
{
	lockdep_annotate_inode_mutex_key(inode);
973
	spin_lock(&inode->i_lock);
Christoph Hellwig's avatar
Christoph Hellwig committed
974
	WARN_ON(!(inode->i_state & I_NEW));
Al Viro's avatar
Al Viro committed
975
	inode->i_state &= ~I_NEW & ~I_CREATING;
976
	smp_mb();
977
978
	wake_up_bit(&inode->i_state, __I_NEW);
	spin_unlock(&inode->i_lock);
Linus Torvalds's avatar
Linus Torvalds committed
979
980
981
}
EXPORT_SYMBOL(unlock_new_inode);

Al Viro's avatar
Al Viro committed
982
983
984
985
986
987
988
989
990
991
992
993
994
void discard_new_inode(struct inode *inode)
{
	lockdep_annotate_inode_mutex_key(inode);
	spin_lock(&inode->i_lock);
	WARN_ON(!(inode->i_state & I_NEW));
	inode->i_state &= ~I_NEW;
	smp_mb();
	wake_up_bit(&inode->i_state, __I_NEW);
	spin_unlock(&inode->i_lock);
	iput(inode);
}
EXPORT_SYMBOL(discard_new_inode);

995
996
/**
 * lock_two_nondirectories - take two i_mutexes on non-directory objects
997
998
999
1000
 *
 * Lock any non-NULL argument that is not a directory.
 * Zero, one or two objects may be locked by this function.
 *
1001
1002
1003
1004
1005
 * @inode1: first inode to lock
 * @inode2: second inode to lock
 */
void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
1006
1007
1008
1009
	if (inode1 > inode2)
		swap(inode1, inode2);

	if (inode1 && !S_ISDIR(inode1->i_mode))
Al Viro's avatar
Al Viro committed
1010
		inode_lock(inode1);
1011
	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
Al Viro's avatar
Al Viro committed
1012
		inode_lock_nested(inode2, I_MUTEX_NONDIR2);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
}
EXPORT_SYMBOL(lock_two_nondirectories);

/**
 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
 * @inode1: first inode to unlock
 * @inode2: second inode to unlock
 */
void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
1023
	if (inode1 && !S_ISDIR(inode1->i_mode))
Al Viro's avatar
Al Viro committed
1024
		inode_unlock(inode1);
1025
	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
Al Viro's avatar
Al Viro committed
1026
		inode_unlock(inode2);
1027
1028
1029
}
EXPORT_SYMBOL(unlock_two_nondirectories);

1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
/**
 * inode_insert5 - obtain an inode from a mounted file system
 * @inode:	pre-allocated inode to use for insert to cache
 * @hashval:	hash value (usually inode number) to get
 * @test:	callback used for comparisons between inodes
 * @set:	callback used to initialize a new struct inode
 * @data:	opaque data pointer to pass to @test and @set
 *
 * Search for the inode specified by @hashval and @data in the inode cache,
 * and if present it is return it with an increased reference count. This is
 * a variant of iget5_locked() for callers that don't want to fail on memory
 * allocation of inode.
 *
 * If the inode is not in cache, insert the pre-allocated inode to cache and
 * return it locked, hashed, and with the I_NEW flag set. The file system gets
 * to fill it in before unlocking it via unlock_new_inode().
 *
 * Note both @test and @set are called with the inode_hash_lock held, so can't
 * sleep.
 */
struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
			    int (*test)(struct inode *, void *),
			    int (*set)(struct inode *, void *), void *data)
{
	struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
	struct inode *old;
1056
	bool creating = inode->i_state & I_CREATING;
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066

again:
	spin_lock(&inode_hash_lock);
	old = find_inode(inode->i_sb, head, test, data);
	if (unlikely(old)) {
		/*
		 * Uhhuh, somebody else created the same inode under us.
		 * Use the old inode instead of the preallocated one.
		 */
		spin_unlock(&inode_hash_lock);
Al Viro's avatar
Al Viro committed
1067
1068
		if (IS_ERR(old))
			return NULL;
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
		wait_on_inode(old);
		if (unlikely(inode_unhashed(old))) {
			iput(old);
			goto again;
		}
		return old;
	}

	if (set && unlikely(set(inode, data))) {
		inode = NULL;
		goto unlock;
	}

	/*
	 * Return the locked inode with I_NEW set, the
	 * caller is responsible for filling in the contents
	 */
	spin_lock(&inode->i_lock);
	inode->i_state |= I_NEW;
	hlist_add_head(&inode->i_hash, head);
	spin_unlock(&inode->i_lock);
1090
1091
	if (!creating)
		inode_sb_list_add(inode);
1092
1093
1094
1095
1096
1097
1098
unlock:
	spin_unlock(&inode_hash_lock);

	return inode;
}
EXPORT_SYMBOL(inode_insert5);

Christoph Hellwig's avatar