file.c 59 KB
Newer Older
1
2
/*
  FUSE: Filesystem in Userspace
Miklos Szeredi's avatar
Miklos Szeredi committed
3
  Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5
6
7
8
9
10
11
12
13

  This program can be distributed under the terms of the GNU GPL.
  See the file COPYING.
*/

#include "fuse_i.h"

#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/kernel.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
14
#include <linux/sched.h>
15
#include <linux/module.h>
16
#include <linux/compat.h>
17
#include <linux/swap.h>
18

19
static const struct file_operations fuse_direct_io_file_operations;
20

21
22
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
			  int opcode, struct fuse_open_out *outargp)
23
24
{
	struct fuse_open_in inarg;
25
26
27
	struct fuse_req *req;
	int err;

28
	req = fuse_get_req_nopages(fc);
29
30
	if (IS_ERR(req))
		return PTR_ERR(req);
31
32

	memset(&inarg, 0, sizeof(inarg));
33
34
35
	inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
	if (!fc->atomic_o_trunc)
		inarg.flags &= ~O_TRUNC;
36
37
	req->in.h.opcode = opcode;
	req->in.h.nodeid = nodeid;
38
39
40
41
42
43
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(inarg);
	req->in.args[0].value = &inarg;
	req->out.numargs = 1;
	req->out.args[0].size = sizeof(*outargp);
	req->out.args[0].value = outargp;
44
	fuse_request_send(fc, req);
45
46
47
48
49
50
	err = req->out.h.error;
	fuse_put_request(fc, req);

	return err;
}

Tejun Heo's avatar
Tejun Heo committed
51
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
52
53
{
	struct fuse_file *ff;
Tejun Heo's avatar
Tejun Heo committed
54

55
	ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
Tejun Heo's avatar
Tejun Heo committed
56
57
58
	if (unlikely(!ff))
		return NULL;

59
	ff->fc = fc;
60
	ff->reserved_req = fuse_request_alloc(0);
Tejun Heo's avatar
Tejun Heo committed
61
62
63
	if (unlikely(!ff->reserved_req)) {
		kfree(ff);
		return NULL;
64
	}
Tejun Heo's avatar
Tejun Heo committed
65
66
67
68
69
70
71
72
73
74

	INIT_LIST_HEAD(&ff->write_entry);
	atomic_set(&ff->count, 0);
	RB_CLEAR_NODE(&ff->polled_node);
	init_waitqueue_head(&ff->poll_wait);

	spin_lock(&fc->lock);
	ff->kh = ++fc->khctr;
	spin_unlock(&fc->lock);

75
76
77
78
79
	return ff;
}

void fuse_file_free(struct fuse_file *ff)
{
80
	fuse_request_free(ff->reserved_req);
81
82
83
	kfree(ff);
}

84
struct fuse_file *fuse_file_get(struct fuse_file *ff)
85
86
87
88
89
{
	atomic_inc(&ff->count);
	return ff;
}

90
91
92
93
94
95
96
97
98
99
100
101
102
103
static void fuse_release_async(struct work_struct *work)
{
	struct fuse_req *req;
	struct fuse_conn *fc;
	struct path path;

	req = container_of(work, struct fuse_req, misc.release.work);
	path = req->misc.release.path;
	fc = get_fuse_conn(path.dentry->d_inode);

	fuse_put_request(fc, req);
	path_put(&path);
}

Miklos Szeredi's avatar
Miklos Szeredi committed
104
105
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
	if (fc->destroy_req) {
		/*
		 * If this is a fuseblk mount, then it's possible that
		 * releasing the path will result in releasing the
		 * super block and sending the DESTROY request.  If
		 * the server is single threaded, this would hang.
		 * For this reason do the path_put() in a separate
		 * thread.
		 */
		atomic_inc(&req->count);
		INIT_WORK(&req->misc.release.work, fuse_release_async);
		schedule_work(&req->misc.release.work);
	} else {
		path_put(&req->misc.release.path);
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
121
122
}

123
static void fuse_file_put(struct fuse_file *ff, bool sync)
124
125
126
{
	if (atomic_dec_and_test(&ff->count)) {
		struct fuse_req *req = ff->reserved_req;
127

128
		if (sync) {
129
			req->background = 0;
130
131
132
133
134
			fuse_request_send(ff->fc, req);
			path_put(&req->misc.release.path);
			fuse_put_request(ff->fc, req);
		} else {
			req->end = fuse_release_end;
135
			req->background = 1;
136
137
			fuse_request_send_background(ff->fc, req);
		}
138
139
140
141
		kfree(ff);
	}
}

142
143
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
		 bool isdir)
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
{
	struct fuse_open_out outarg;
	struct fuse_file *ff;
	int err;
	int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;

	ff = fuse_file_alloc(fc);
	if (!ff)
		return -ENOMEM;

	err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
	if (err) {
		fuse_file_free(ff);
		return err;
	}

	if (isdir)
		outarg.open_flags &= ~FOPEN_DIRECT_IO;

	ff->fh = outarg.fh;
	ff->nodeid = nodeid;
	ff->open_flags = outarg.open_flags;
	file->private_data = fuse_file_get(ff);

	return 0;
}
170
EXPORT_SYMBOL_GPL(fuse_do_open);
171

172
void fuse_finish_open(struct inode *inode, struct file *file)
173
{
174
	struct fuse_file *ff = file->private_data;
175
	struct fuse_conn *fc = get_fuse_conn(inode);
176
177

	if (ff->open_flags & FOPEN_DIRECT_IO)
178
		file->f_op = &fuse_direct_io_file_operations;
179
	if (!(ff->open_flags & FOPEN_KEEP_CACHE))
Miklos Szeredi's avatar
Miklos Szeredi committed
180
		invalidate_inode_pages2(inode->i_mapping);
181
	if (ff->open_flags & FOPEN_NONSEEKABLE)
Tejun Heo's avatar
Tejun Heo committed
182
		nonseekable_open(inode, file);
183
184
185
186
187
188
189
190
191
	if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
		struct fuse_inode *fi = get_fuse_inode(inode);

		spin_lock(&fc->lock);
		fi->attr_version = ++fc->attr_version;
		i_size_write(inode, 0);
		spin_unlock(&fc->lock);
		fuse_invalidate_attr(inode);
	}
192
193
}

194
int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
195
{
Tejun Heo's avatar
Tejun Heo committed
196
	struct fuse_conn *fc = get_fuse_conn(inode);
197
198
199
200
201
202
	int err;

	err = generic_file_open(inode, file);
	if (err)
		return err;

203
	err = fuse_do_open(fc, get_node_id(inode), file, isdir);
204
	if (err)
205
		return err;
206

207
208
209
	fuse_finish_open(inode, file);

	return 0;
210
211
}

212
static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
213
{
214
	struct fuse_conn *fc = ff->fc;
215
	struct fuse_req *req = ff->reserved_req;
216
	struct fuse_release_in *inarg = &req->misc.release.in;
217

218
219
220
221
222
223
	spin_lock(&fc->lock);
	list_del(&ff->write_entry);
	if (!RB_EMPTY_NODE(&ff->polled_node))
		rb_erase(&ff->polled_node, &fc->polled_files);
	spin_unlock(&fc->lock);

224
	wake_up_interruptible_all(&ff->poll_wait);
225

226
	inarg->fh = ff->fh;
227
	inarg->flags = flags;
228
	req->in.h.opcode = opcode;
229
	req->in.h.nodeid = ff->nodeid;
230
231
232
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(struct fuse_release_in);
	req->in.args[0].value = inarg;
233
234
}

235
void fuse_release_common(struct file *file, int opcode)
236
{
Tejun Heo's avatar
Tejun Heo committed
237
238
	struct fuse_file *ff;
	struct fuse_req *req;
239

Tejun Heo's avatar
Tejun Heo committed
240
241
	ff = file->private_data;
	if (unlikely(!ff))
242
		return;
Tejun Heo's avatar
Tejun Heo committed
243
244

	req = ff->reserved_req;
245
	fuse_prepare_release(ff, file->f_flags, opcode);
Tejun Heo's avatar
Tejun Heo committed
246

Miklos Szeredi's avatar
Miklos Szeredi committed
247
248
249
250
251
252
	if (ff->flock) {
		struct fuse_release_in *inarg = &req->misc.release.in;
		inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
		inarg->lock_owner = fuse_lock_owner_id(ff->fc,
						       (fl_owner_t) file);
	}
Tejun Heo's avatar
Tejun Heo committed
253
	/* Hold vfsmount and dentry until release is finished */
254
255
	path_get(&file->f_path);
	req->misc.release.path = file->f_path;
Tejun Heo's avatar
Tejun Heo committed
256
257
258
259
260

	/*
	 * Normally this will send the RELEASE request, however if
	 * some asynchronous READ or WRITE requests are outstanding,
	 * the sending will be delayed.
261
262
263
264
	 *
	 * Make the release synchronous if this is a fuseblk mount,
	 * synchronous RELEASE is allowed (and desirable) in this case
	 * because the server can be trusted not to screw up.
Tejun Heo's avatar
Tejun Heo committed
265
	 */
266
	fuse_file_put(ff, ff->fc->destroy_req != NULL);
267
268
}

269
270
static int fuse_open(struct inode *inode, struct file *file)
{
271
	return fuse_open_common(inode, file, false);
272
273
274
275
}

static int fuse_release(struct inode *inode, struct file *file)
{
276
277
278
279
280
281
282
283
284
285
286
	fuse_release_common(file, FUSE_RELEASE);

	/* return value is ignored by VFS */
	return 0;
}

void fuse_sync_release(struct fuse_file *ff, int flags)
{
	WARN_ON(atomic_read(&ff->count) > 1);
	fuse_prepare_release(ff, flags, FUSE_RELEASE);
	ff->reserved_req->force = 1;
287
	ff->reserved_req->background = 0;
288
289
290
	fuse_request_send(ff->fc, ff->reserved_req);
	fuse_put_request(ff->fc, ff->reserved_req);
	kfree(ff);
291
}
292
EXPORT_SYMBOL_GPL(fuse_sync_release);
293

294
/*
295
296
 * Scramble the ID space with XTEA, so that the value of the files_struct
 * pointer is not exposed to userspace.
297
 */
298
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
299
{
300
301
302
303
304
305
306
307
308
309
310
311
312
313
	u32 *k = fc->scramble_key;
	u64 v = (unsigned long) id;
	u32 v0 = v;
	u32 v1 = v >> 32;
	u32 sum = 0;
	int i;

	for (i = 0; i < 32; i++) {
		v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
		sum += 0x9E3779B9;
		v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
	}

	return (u64) v0 + ((u64) v1 << 32);
314
315
}

Miklos Szeredi's avatar
Miklos Szeredi committed
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
/*
 * Check if page is under writeback
 *
 * This is currently done by walking the list of writepage requests
 * for the inode, which can be pretty inefficient.
 */
static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
{
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);
	struct fuse_req *req;
	bool found = false;

	spin_lock(&fc->lock);
	list_for_each_entry(req, &fi->writepages, writepages_entry) {
		pgoff_t curr_index;

		BUG_ON(req->inode != inode);
		curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
		if (curr_index == index) {
			found = true;
			break;
		}
	}
	spin_unlock(&fc->lock);

	return found;
}

/*
 * Wait for page writeback to be completed.
 *
 * Since fuse doesn't rely on the VM writeback tracking, this has to
 * use some other means.
 */
static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
{
	struct fuse_inode *fi = get_fuse_inode(inode);

	wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
	return 0;
}

359
static int fuse_flush(struct file *file, fl_owner_t id)
360
{
361
	struct inode *inode = file_inode(file);
362
363
364
365
366
367
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_file *ff = file->private_data;
	struct fuse_req *req;
	struct fuse_flush_in inarg;
	int err;

368
369
370
	if (is_bad_inode(inode))
		return -EIO;

371
372
373
	if (fc->no_flush)
		return 0;

374
	req = fuse_get_req_nofail_nopages(fc, file);
375
376
	memset(&inarg, 0, sizeof(inarg));
	inarg.fh = ff->fh;
377
	inarg.lock_owner = fuse_lock_owner_id(fc, id);
378
379
380
381
382
	req->in.h.opcode = FUSE_FLUSH;
	req->in.h.nodeid = get_node_id(inode);
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(inarg);
	req->in.args[0].value = &inarg;
383
	req->force = 1;
384
	fuse_request_send(fc, req);
385
386
387
388
389
390
391
392
393
	err = req->out.h.error;
	fuse_put_request(fc, req);
	if (err == -ENOSYS) {
		fc->no_flush = 1;
		err = 0;
	}
	return err;
}

Miklos Szeredi's avatar
Miklos Szeredi committed
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
/*
 * Wait for all pending writepages on the inode to finish.
 *
 * This is currently done by blocking further writes with FUSE_NOWRITE
 * and waiting for all sent writes to complete.
 *
 * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
 * could conflict with truncation.
 */
static void fuse_sync_writes(struct inode *inode)
{
	fuse_set_nowrite(inode);
	fuse_release_nowrite(inode);
}

409
410
int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
		      int datasync, int isdir)
411
{
412
	struct inode *inode = file->f_mapping->host;
413
414
415
416
417
418
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_file *ff = file->private_data;
	struct fuse_req *req;
	struct fuse_fsync_in inarg;
	int err;

419
420
421
	if (is_bad_inode(inode))
		return -EIO;

422
423
424
425
	err = filemap_write_and_wait_range(inode->i_mapping, start, end);
	if (err)
		return err;

426
	if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
427
428
		return 0;

429
430
	mutex_lock(&inode->i_mutex);

Miklos Szeredi's avatar
Miklos Szeredi committed
431
432
433
434
435
436
437
	/*
	 * Start writeback against all dirty pages of the inode, then
	 * wait for all outstanding writes, before sending the FSYNC
	 * request.
	 */
	err = write_inode_now(inode, 0);
	if (err)
438
		goto out;
Miklos Szeredi's avatar
Miklos Szeredi committed
439
440
441

	fuse_sync_writes(inode);

442
	req = fuse_get_req_nopages(fc);
443
444
445
446
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
		goto out;
	}
447
448
449
450

	memset(&inarg, 0, sizeof(inarg));
	inarg.fh = ff->fh;
	inarg.fsync_flags = datasync ? 1 : 0;
451
	req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
452
453
454
455
	req->in.h.nodeid = get_node_id(inode);
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(inarg);
	req->in.args[0].value = &inarg;
456
	fuse_request_send(fc, req);
457
458
459
	err = req->out.h.error;
	fuse_put_request(fc, req);
	if (err == -ENOSYS) {
460
461
462
463
		if (isdir)
			fc->no_fsyncdir = 1;
		else
			fc->no_fsync = 1;
464
465
		err = 0;
	}
466
467
out:
	mutex_unlock(&inode->i_mutex);
468
469
470
	return err;
}

471
472
static int fuse_fsync(struct file *file, loff_t start, loff_t end,
		      int datasync)
473
{
474
	return fuse_fsync_common(file, start, end, datasync, 0);
475
476
}

477
478
void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
		    size_t count, int opcode)
479
{
480
	struct fuse_read_in *inarg = &req->misc.read.in;
481
	struct fuse_file *ff = file->private_data;
482

483
484
485
	inarg->fh = ff->fh;
	inarg->offset = pos;
	inarg->size = count;
486
	inarg->flags = file->f_flags;
487
	req->in.h.opcode = opcode;
488
	req->in.h.nodeid = ff->nodeid;
489
490
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(struct fuse_read_in);
491
	req->in.args[0].value = inarg;
492
493
494
495
496
	req->out.argvar = 1;
	req->out.numargs = 1;
	req->out.args[0].size = count;
}

497
498
499
500
501
502
503
504
505
506
507
508
static void fuse_release_user_pages(struct fuse_req *req, int write)
{
	unsigned i;

	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];
		if (write)
			set_page_dirty_lock(page);
		put_page(page);
	}
}

Maxim Patlasov's avatar
Maxim Patlasov committed
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
/**
 * In case of short read, the caller sets 'pos' to the position of
 * actual end of fuse request in IO request. Otherwise, if bytes_requested
 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
 *
 * An example:
 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
 * both submitted asynchronously. The first of them was ACKed by userspace as
 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
 * second request was ACKed as short, e.g. only 1K was read, resulting in
 * pos == 33K.
 *
 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
 * will be equal to the length of the longest contiguous fragment of
 * transferred data starting from the beginning of IO request.
 */
static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
{
	int left;

	spin_lock(&io->lock);
	if (err)
		io->err = io->err ? : err;
	else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
		io->bytes = pos;

	left = --io->reqs;
	spin_unlock(&io->lock);

	if (!left) {
		long res;

		if (io->err)
			res = io->err;
		else if (io->bytes >= 0 && io->write)
			res = -EIO;
		else {
			res = io->bytes < 0 ? io->size : io->bytes;

			if (!is_sync_kiocb(io->iocb)) {
				struct path *path = &io->iocb->ki_filp->f_path;
				struct inode *inode = path->dentry->d_inode;
				struct fuse_conn *fc = get_fuse_conn(inode);
				struct fuse_inode *fi = get_fuse_inode(inode);

				spin_lock(&fc->lock);
				fi->attr_version = ++fc->attr_version;
				spin_unlock(&fc->lock);
			}
		}

		aio_complete(io->iocb, res, 0);
		kfree(io);
	}
}

static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
{
	struct fuse_io_priv *io = req->io;
	ssize_t pos = -1;

	fuse_release_user_pages(req, !io->write);

	if (io->write) {
		if (req->misc.write.in.size != req->misc.write.out.size)
			pos = req->misc.write.in.offset - io->offset +
				req->misc.write.out.size;
	} else {
		if (req->misc.read.in.size != req->out.args[0].size)
			pos = req->misc.read.in.offset - io->offset +
				req->out.args[0].size;
	}

	fuse_aio_complete(io, req->out.h.error, pos);
}

static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
		size_t num_bytes, struct fuse_io_priv *io)
{
	spin_lock(&io->lock);
	io->size += num_bytes;
	io->reqs++;
	spin_unlock(&io->lock);

	req->io = io;
	req->end = fuse_aio_complete_req;

	fuse_request_send_background(fc, req);

	return num_bytes;
}

601
static size_t fuse_send_read(struct fuse_req *req, struct file *file,
602
			     loff_t pos, size_t count, fl_owner_t owner)
603
{
604
605
	struct fuse_file *ff = file->private_data;
	struct fuse_conn *fc = ff->fc;
606

607
	fuse_read_fill(req, file, pos, count, FUSE_READ);
608
	if (owner != NULL) {
609
		struct fuse_read_in *inarg = &req->misc.read.in;
610
611
612
613

		inarg->read_flags |= FUSE_READ_LOCKOWNER;
		inarg->lock_owner = fuse_lock_owner_id(fc, owner);
	}
614
	fuse_request_send(fc, req);
615
	return req->out.args[0].size;
616
617
}

618
619
620
621
622
623
624
625
626
627
628
629
630
631
static void fuse_read_update_size(struct inode *inode, loff_t size,
				  u64 attr_ver)
{
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);

	spin_lock(&fc->lock);
	if (attr_ver == fi->attr_version && size < inode->i_size) {
		fi->attr_version = ++fc->attr_version;
		i_size_write(inode, size);
	}
	spin_unlock(&fc->lock);
}

632
633
634
635
static int fuse_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
636
	struct fuse_req *req;
637
638
639
640
	size_t num_read;
	loff_t pos = page_offset(page);
	size_t count = PAGE_CACHE_SIZE;
	u64 attr_ver;
641
642
643
644
645
646
	int err;

	err = -EIO;
	if (is_bad_inode(inode))
		goto out;

Miklos Szeredi's avatar
Miklos Szeredi committed
647
	/*
Lucas De Marchi's avatar
Lucas De Marchi committed
648
	 * Page writeback can extend beyond the lifetime of the
Miklos Szeredi's avatar
Miklos Szeredi committed
649
650
651
652
653
	 * page-cache page, so make sure we read a properly synced
	 * page.
	 */
	fuse_wait_on_page_writeback(inode, page->index);

654
	req = fuse_get_req(fc, 1);
655
656
	err = PTR_ERR(req);
	if (IS_ERR(req))
657
658
		goto out;

659
660
	attr_ver = fuse_get_attr_version(fc);

661
	req->out.page_zeroing = 1;
662
	req->out.argpages = 1;
663
664
	req->num_pages = 1;
	req->pages[0] = page;
665
	req->page_descs[0].length = count;
666
	num_read = fuse_send_read(req, file, pos, count, NULL);
667
668
	err = req->out.h.error;
	fuse_put_request(fc, req);
669
670
671
672
673
674
675
676

	if (!err) {
		/*
		 * Short read means EOF.  If file size is larger, truncate it
		 */
		if (num_read < count)
			fuse_read_update_size(inode, pos + num_read, attr_ver);

677
		SetPageUptodate(page);
678
679
	}

680
	fuse_invalidate_attr(inode); /* atime changed */
681
682
683
684
685
 out:
	unlock_page(page);
	return err;
}

686
static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
687
{
688
	int i;
689
690
	size_t count = req->misc.read.in.size;
	size_t num_read = req->out.args[0].size;
691
	struct address_space *mapping = NULL;
692

693
694
	for (i = 0; mapping == NULL && i < req->num_pages; i++)
		mapping = req->pages[i]->mapping;
695

696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
	if (mapping) {
		struct inode *inode = mapping->host;

		/*
		 * Short read means EOF. If file size is larger, truncate it
		 */
		if (!req->out.h.error && num_read < count) {
			loff_t pos;

			pos = page_offset(req->pages[0]) + num_read;
			fuse_read_update_size(inode, pos,
					      req->misc.read.attr_ver);
		}
		fuse_invalidate_attr(inode); /* atime changed */
	}
711

712
713
714
715
	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];
		if (!req->out.h.error)
			SetPageUptodate(page);
716
717
		else
			SetPageError(page);
718
		unlock_page(page);
719
		page_cache_release(page);
720
	}
721
	if (req->ff)
722
		fuse_file_put(req->ff, false);
723
724
}

725
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
726
{
727
728
	struct fuse_file *ff = file->private_data;
	struct fuse_conn *fc = ff->fc;
729
730
	loff_t pos = page_offset(req->pages[0]);
	size_t count = req->num_pages << PAGE_CACHE_SHIFT;
731
732

	req->out.argpages = 1;
733
	req->out.page_zeroing = 1;
734
	req->out.page_replace = 1;
735
	fuse_read_fill(req, file, pos, count, FUSE_READ);
736
	req->misc.read.attr_ver = fuse_get_attr_version(fc);
737
	if (fc->async_read) {
738
		req->ff = fuse_file_get(ff);
739
		req->end = fuse_readpages_end;
740
		fuse_request_send_background(fc, req);
741
	} else {
742
		fuse_request_send(fc, req);
743
		fuse_readpages_end(fc, req);
744
		fuse_put_request(fc, req);
745
	}
746
747
}

748
struct fuse_fill_data {
749
	struct fuse_req *req;
750
	struct file *file;
751
	struct inode *inode;
Maxim Patlasov's avatar
Maxim Patlasov committed
752
	unsigned nr_pages;
753
754
755
756
};

static int fuse_readpages_fill(void *_data, struct page *page)
{
757
	struct fuse_fill_data *data = _data;
758
759
760
761
	struct fuse_req *req = data->req;
	struct inode *inode = data->inode;
	struct fuse_conn *fc = get_fuse_conn(inode);

Miklos Szeredi's avatar
Miklos Szeredi committed
762
763
	fuse_wait_on_page_writeback(inode, page->index);

764
765
766
767
	if (req->num_pages &&
	    (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
	     (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
	     req->pages[req->num_pages - 1]->index + 1 != page->index)) {
Maxim Patlasov's avatar
Maxim Patlasov committed
768
769
		int nr_alloc = min_t(unsigned, data->nr_pages,
				     FUSE_MAX_PAGES_PER_REQ);
770
		fuse_send_readpages(req, data->file);
771
772
773
774
775
776
		if (fc->async_read)
			req = fuse_get_req_for_background(fc, nr_alloc);
		else
			req = fuse_get_req(fc, nr_alloc);

		data->req = req;
777
		if (IS_ERR(req)) {
778
			unlock_page(page);
779
			return PTR_ERR(req);
780
781
		}
	}
Maxim Patlasov's avatar
Maxim Patlasov committed
782
783
784
785
786
787

	if (WARN_ON(req->num_pages >= req->max_pages)) {
		fuse_put_request(fc, req);
		return -EIO;
	}

788
	page_cache_get(page);
789
	req->pages[req->num_pages] = page;
790
	req->page_descs[req->num_pages].length = PAGE_SIZE;
Miklos Szeredi's avatar
Miklos Szeredi committed
791
	req->num_pages++;
Maxim Patlasov's avatar
Maxim Patlasov committed
792
	data->nr_pages--;
793
794
795
796
797
798
799
800
	return 0;
}

static int fuse_readpages(struct file *file, struct address_space *mapping,
			  struct list_head *pages, unsigned nr_pages)
{
	struct inode *inode = mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
801
	struct fuse_fill_data data;
802
	int err;
Maxim Patlasov's avatar
Maxim Patlasov committed
803
	int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
804

805
	err = -EIO;
806
	if (is_bad_inode(inode))
807
		goto out;
808

809
	data.file = file;
810
	data.inode = inode;
811
812
813
814
	if (fc->async_read)
		data.req = fuse_get_req_for_background(fc, nr_alloc);
	else
		data.req = fuse_get_req(fc, nr_alloc);
Maxim Patlasov's avatar
Maxim Patlasov committed
815
	data.nr_pages = nr_pages;
816
	err = PTR_ERR(data.req);
817
	if (IS_ERR(data.req))
818
		goto out;
819
820

	err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
821
822
	if (!err) {
		if (data.req->num_pages)
823
			fuse_send_readpages(data.req, file);
824
825
826
		else
			fuse_put_request(fc, data.req);
	}
827
out:
828
	return err;
829
830
}

Miklos Szeredi's avatar
Miklos Szeredi committed
831
832
833
834
static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
				  unsigned long nr_segs, loff_t pos)
{
	struct inode *inode = iocb->ki_filp->f_mapping->host;
835
	struct fuse_conn *fc = get_fuse_conn(inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
836

837
838
839
840
841
842
843
	/*
	 * In auto invalidate mode, always update attributes on read.
	 * Otherwise, only update if we attempt to read past EOF (to ensure
	 * i_size is up to date).
	 */
	if (fc->auto_inval_data ||
	    (pos + iov_length(iov, nr_segs) > i_size_read(inode))) {
Miklos Szeredi's avatar
Miklos Szeredi committed
844
845
846
847
848
849
850
851
852
		int err;
		err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
		if (err)
			return err;
	}

	return generic_file_aio_read(iocb, iov, nr_segs, pos);
}

853
static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
854
			    loff_t pos, size_t count)
855
{
856
857
	struct fuse_write_in *inarg = &req->misc.write.in;
	struct fuse_write_out *outarg = &req->misc.write.out;
858

859
860
861
	inarg->fh = ff->fh;
	inarg->offset = pos;
	inarg->size = count;
862
	req->in.h.opcode = FUSE_WRITE;
863
	req->in.h.nodeid = ff->nodeid;
864
	req->in.numargs = 2;
865
	if (ff->fc->minor < 9)
866
867
868
		req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
	else
		req->in.args[0].size = sizeof(struct fuse_write_in);
869
	req->in.args[0].value = inarg;
870
871
872
	req->in.args[1].size = count;
	req->out.numargs = 1;
	req->out.args[0].size = sizeof(struct fuse_write_out);
873
874
875
876
	req->out.args[0].value = outarg;
}

static size_t fuse_send_write(struct fuse_req *req, struct file *file,
877
			      loff_t pos, size_t count, fl_owner_t owner)
878
{
879
880
	struct fuse_file *ff = file->private_data;
	struct fuse_conn *fc = ff->fc;
881
882
	struct fuse_write_in *inarg = &req->misc.write.in;

883
	fuse_write_fill(req, ff, pos, count);
884
	inarg->flags = file->f_flags;
885
886
887
888
	if (owner != NULL) {
		inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
		inarg->lock_owner = fuse_lock_owner_id(fc, owner);
	}
889
	fuse_request_send(fc, req);
890
	return req->misc.write.out.size;
891
892
}

Miklos Szeredi's avatar
Miklos Szeredi committed
893
void fuse_write_update_size(struct inode *inode, loff_t pos)
894
895
896
897
898
899
900
901
902
903
904
{
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_inode *fi = get_fuse_inode(inode);

	spin_lock(&fc->lock);
	fi->attr_version = ++fc->attr_version;
	if (pos > inode->i_size)
		i_size_write(inode, pos);
	spin_unlock(&fc->lock);
}

Nick Piggin's avatar
Nick Piggin committed
905
906
907
908
909
910
911
912
913
914
915
static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
				    struct inode *inode, loff_t pos,
				    size_t count)
{
	size_t res;
	unsigned offset;
	unsigned i;

	for (i = 0; i < req->num_pages; i++)
		fuse_wait_on_page_writeback(inode, req->pages[i]->index);

916
	res = fuse_send_write(req, file, pos, count, NULL);
Nick Piggin's avatar
Nick Piggin committed
917

918
	offset = req->page_descs[0].offset;
Nick Piggin's avatar
Nick Piggin committed
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
	count = res;
	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];

		if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
			SetPageUptodate(page);

		if (count > PAGE_CACHE_SIZE - offset)
			count -= PAGE_CACHE_SIZE - offset;
		else
			count = 0;
		offset = 0;

		unlock_page(page);
		page_cache_release(page);
	}

	return res;
}

static ssize_t fuse_fill_write_pages(struct fuse_req *req,
			       struct address_space *mapping,
			       struct iov_iter *ii, loff_t pos)
{
	struct fuse_conn *fc = get_fuse_conn(mapping->host);
	unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
	size_t count = 0;
	int err;

948
	req->in.argpages = 1;
949
	req->page_descs[0].offset = offset;
Nick Piggin's avatar
Nick Piggin committed
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965

	do {
		size_t tmp;
		struct page *page;
		pgoff_t index = pos >> PAGE_CACHE_SHIFT;
		size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
				     iov_iter_count(ii));

		bytes = min_t(size_t, bytes, fc->max_write - count);

 again:
		err = -EFAULT;
		if (iov_iter_fault_in_readable(ii, bytes))
			break;

		err = -ENOMEM;
966
		page = grab_cache_page_write_begin(mapping, index, 0);
Nick Piggin's avatar
Nick Piggin committed
967
968
969
		if (!page)
			break;

970
971
972
		if (mapping_writably_mapped(mapping))
			flush_dcache_page(page);

Nick Piggin's avatar
Nick Piggin committed
973
974
975
976
977
		pagefault_disable();
		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
		pagefault_enable();
		flush_dcache_page(page);

978
979
		mark_page_accessed(page);

Nick Piggin's avatar
Nick Piggin committed
980
981
982
983
984
985
986
987
988
		if (!tmp) {
			unlock_page(page);
			page_cache_release(page);
			bytes = min(bytes, iov_iter_single_seg_count(ii));
			goto again;
		}

		err = 0;
		req->pages[req->num_pages] = page;
989
		req->page_descs[req->num_pages].length = tmp;
Nick Piggin's avatar
Nick Piggin committed
990
991
992
993
994
995
996
997
998
		req->num_pages++;

		iov_iter_advance(ii, tmp);
		count += tmp;
		pos += tmp;
		offset += tmp;
		if (offset == PAGE_CACHE_SIZE)
			offset = 0;

999
1000
		if (!fc->big_writes)
			break;
Nick Piggin's avatar
Nick Piggin committed
1001
	} while (iov_iter_count(ii) && count < fc->max_write &&
1002
		 req->num_pages < req->max_pages && offset == 0);
Nick Piggin's avatar
Nick Piggin committed
1003
1004
1005
1006

	return count > 0 ? count : err;
}

1007
1008
1009
1010
1011
1012
1013
1014
static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
{
	return min_t(unsigned,
		     ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
		     (pos >> PAGE_CACHE_SHIFT) + 1,
		     FUSE_MAX_PAGES_PER_REQ);
}

Nick Piggin's avatar
Nick Piggin committed
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
static ssize_t fuse_perform_write(struct file *file,
				  struct address_space *mapping,
				  struct iov_iter *ii, loff_t pos)
{
	struct inode *inode = mapping->host;
	struct fuse_conn *fc = get_fuse_conn(inode);
	int err = 0;
	ssize_t res = 0;

	if (is_bad_inode(inode))
		return -EIO;

	do {
		struct fuse_req *req;
		ssize_t count;
1030
		unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
Nick Piggin's avatar
Nick Piggin committed
1031

1032
		req = fuse_get_req(fc, nr_pages);
Nick Piggin's avatar
Nick Piggin committed
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
		if (IS_ERR(req)) {
			err = PTR_ERR(req);
			break;
		}

		count = fuse_fill_write_pages(req, mapping, ii, pos);
		if (count <= 0) {
			err = count;
		} else {
			size_t num_written;

			num_written = fuse_send_write_pages(req, file, inode,
							    pos, count);
			err = req->out.h.error;
			if (!err) {
				res += num_written;
				pos += num_written;

				/* break out of the loop on short write */
				if (num_written != count)
					err = -EIO;
			}
		}
		fuse_put_request(fc, req);
	} while (!err && iov_iter_count(ii));

	if (res > 0)
		fuse_write_update_size(inode, pos);

	fuse_invalidate_attr(inode);

	return res > 0 ? res : err;
}

static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
				   unsigned long nr_segs, loff_t pos)
{
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;
	size_t count = 0;
Anand Avati's avatar
Anand Avati committed
1073
	size_t ocount = 0;
Nick Piggin's avatar
Nick Piggin committed
1074
	ssize_t written = 0;
Anand Avati's avatar
Anand Avati committed
1075
	ssize_t written_buffered = 0;
Nick Piggin's avatar
Nick Piggin committed
1076
1077
1078
	struct inode *inode = mapping->host;
	ssize_t err;
	struct iov_iter i;
Anand Avati's avatar
Anand Avati committed
1079
	loff_t endbyte = 0;
Nick Piggin's avatar
Nick Piggin committed
1080
1081
1082

	WARN_ON(iocb->ki_pos != pos);

Anand Avati's avatar
Anand Avati committed
1083
1084
	ocount = 0;
	err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
Nick Piggin's avatar
Nick Piggin committed
1085
1086
1087
	if (err)
		return err;

Anand Avati's avatar
Anand Avati committed
1088
	count = ocount;
1089
	sb_start_write(inode->i_sb);
Nick Piggin's avatar
Nick Piggin committed
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
	mutex_lock(&inode->i_mutex);

	/* We can write back this queue in page reclaim */
	current->backing_dev_info = mapping->backing_dev_info;

	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
	if (err)
		goto out;

	if (count == 0)
		goto out;

1102
	err = file_remove_suid(file);
Nick Piggin's avatar
Nick Piggin committed
1103
1104
1105
	if (err)
		goto out;

1106
1107
1108
	err = file_update_time(file);
	if (err)
		goto out;
Nick Piggin's avatar
Nick Piggin committed
1109

Anand Avati's avatar
Anand Avati committed
1110
1111
1112
1113
1114
1115
1116
1117
1118
	if (file->f_flags & O_DIRECT) {
		written = generic_file_direct_write(iocb, iov, &nr_segs,
						    pos, &iocb->ki_pos,
						    count, ocount);
		if (written < 0 || written == count)
			goto out;

		pos += written;
		count -= written;
Nick Piggin's avatar
Nick Piggin committed
1119

Anand Avati's avatar
Anand Avati committed
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
		iov_iter_init(&i, iov, nr_segs, count, written);
		written_buffered = fuse_perform_write(file, mapping, &i, pos);
		if (written_buffered < 0) {
			err = written_buffered;
			goto out;
		}
		endbyte = pos + written_buffered - 1;

		err = filemap_write_and_wait_range(file->f_mapping, pos,
						   endbyte);
		if (err)
			goto out;

		invalidate_mapping_pages(file->f_mapping,
					 pos >> PAGE_CACHE_SHIFT,
					 endbyte >> PAGE_CACHE_SHIFT);

		written += written_buffered;
		iocb->ki_pos = pos + written_buffered;
	} else {
		iov_iter_init(&i, iov, nr_segs, count, 0);
		written = fuse_perform_write(file, mapping, &i, pos);
		if (written >= 0)
			iocb->ki_pos = pos + written;
	}
Nick Piggin's avatar
Nick Piggin committed
1145
1146
1147
out:
	current->backing_dev_info = NULL;
	mutex_unlock(&inode->i_mutex);
1148
	sb_end_write(inode->i_sb);
Nick Piggin's avatar
Nick Piggin committed
1149
1150
1151
1152

	return written ? written : err;
}

1153
1154
static inline void fuse_page_descs_length_init(struct fuse_req *req,
		unsigned index, unsigned nr_pages)
1155
1156
1157
{
	int i;

1158
	for (i = index; i < index + nr_pages; i++)
1159
1160
1161
1162
		req->page_descs[i].length = PAGE_SIZE -
			req->page_descs[i].offset;
}

1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
{
	return (unsigned long)ii->iov->iov_base + ii->iov_offset;
}

static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
					size_t max_size)
{
	return min(iov_iter_single_seg_count(ii), max_size);
}

1174
static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1175
			       size_t *nbytesp, int write)
Miklos Szeredi's avatar
Miklos Szeredi committed
1176
{
1177
	size_t nbytes = 0;  /* # bytes already packed in req */
1178

1179
1180
	/* Special case for kernel I/O: can copy directly into the buffer */
	if (segment_eq(get_fs(), KERNEL_DS)) {
1181
1182
1183
		unsigned long user_addr = fuse_get_user_addr(ii);
		size_t frag_size = fuse_get_frag_size(ii, *nbytesp);

1184
1185
1186
1187
1188
		if (write)
			req->in.args[1].value = (void *) user_addr;
		else
			req->out.args[0].value = (void *) user_addr;

1189
1190
		iov_iter_advance(ii, frag_size);
		*nbytesp = frag_size;
1191
1192
		return 0;
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
1193

1194
	while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
1195
1196
1197
1198
1199
		unsigned npages;
		unsigned long user_addr = fuse_get_user_addr(ii);
		unsigned offset = user_addr & ~PAGE_MASK;
		size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes);
		int ret;
Miklos Szeredi's avatar
Miklos Szeredi committed
1200

1201
		unsigned n = req->max_pages - req->num_pages;
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
		frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT);

		npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
		npages = clamp(npages, 1U, n);

		ret = get_user_pages_fast(user_addr, npages, !write,
					  &req->pages[req->num_pages]);
		if (ret < 0)
			return ret;

		npages = ret;
		frag_size = min_t(size_t, frag_size,
				  (npages <<