dev.c 50.5 KB
Newer Older
1
2
/*
  FUSE: Filesystem in Userspace
Miklos Szeredi's avatar
Miklos Szeredi committed
3
  Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

  This program can be distributed under the terms of the GNU GPL.
  See the file COPYING.
*/

#include "fuse_i.h"

#include <linux/init.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/uio.h>
#include <linux/miscdevice.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/slab.h>
19
#include <linux/pipe_fs_i.h>
20
21
#include <linux/swap.h>
#include <linux/splice.h>
22
23

MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24
MODULE_ALIAS("devname:fuse");
25

26
static struct kmem_cache *fuse_req_cachep;
27

28
static struct fuse_conn *fuse_get_conn(struct file *file)
29
{
30
31
32
33
34
	/*
	 * Lockless access is OK, because file->private data is set
	 * once during mount and is valid until the file is released.
	 */
	return file->private_data;
35
36
}

37
static void fuse_request_init(struct fuse_req *req, struct page **pages,
38
			      struct fuse_page_desc *page_descs,
39
			      unsigned npages)
40
41
{
	memset(req, 0, sizeof(*req));
42
	memset(pages, 0, sizeof(*pages) * npages);
43
	memset(page_descs, 0, sizeof(*page_descs) * npages);
44
	INIT_LIST_HEAD(&req->list);
45
	INIT_LIST_HEAD(&req->intr_entry);
46
47
	init_waitqueue_head(&req->waitq);
	atomic_set(&req->count, 1);
48
	req->pages = pages;
49
	req->page_descs = page_descs;
50
	req->max_pages = npages;
Miklos Szeredi's avatar
Miklos Szeredi committed
51
	__set_bit(FR_PENDING, &req->flags);
52
53
}

54
static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
55
{
56
57
58
	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
	if (req) {
		struct page **pages;
59
		struct fuse_page_desc *page_descs;
60

61
		if (npages <= FUSE_REQ_INLINE_PAGES) {
62
			pages = req->inline_pages;
63
64
			page_descs = req->inline_page_descs;
		} else {
65
			pages = kmalloc(sizeof(struct page *) * npages, flags);
66
67
68
			page_descs = kmalloc(sizeof(struct fuse_page_desc) *
					     npages, flags);
		}
69

70
71
72
		if (!pages || !page_descs) {
			kfree(pages);
			kfree(page_descs);
73
74
75
76
			kmem_cache_free(fuse_req_cachep, req);
			return NULL;
		}

77
		fuse_request_init(req, pages, page_descs, npages);
78
	}
79
80
	return req;
}
81
82
83
84
85

struct fuse_req *fuse_request_alloc(unsigned npages)
{
	return __fuse_request_alloc(npages, GFP_KERNEL);
}
86
EXPORT_SYMBOL_GPL(fuse_request_alloc);
87

88
struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
Miklos Szeredi's avatar
Miklos Szeredi committed
89
{
90
	return __fuse_request_alloc(npages, GFP_NOFS);
Miklos Szeredi's avatar
Miklos Szeredi committed
91
92
}

93
94
void fuse_request_free(struct fuse_req *req)
{
95
	if (req->pages != req->inline_pages) {
96
		kfree(req->pages);
97
98
		kfree(req->page_descs);
	}
99
100
101
	kmem_cache_free(fuse_req_cachep, req);
}

102
static void block_sigs(sigset_t *oldset)
103
104
105
106
107
108
109
{
	sigset_t mask;

	siginitsetinv(&mask, sigmask(SIGKILL));
	sigprocmask(SIG_BLOCK, &mask, oldset);
}

110
static void restore_sigs(sigset_t *oldset)
111
112
113
114
{
	sigprocmask(SIG_SETMASK, oldset, NULL);
}

115
void __fuse_get_request(struct fuse_req *req)
116
117
118
119
120
121
122
123
124
125
126
{
	atomic_inc(&req->count);
}

/* Must be called with > 1 refcount */
static void __fuse_put_request(struct fuse_req *req)
{
	BUG_ON(atomic_read(&req->count) < 2);
	atomic_dec(&req->count);
}

127
128
static void fuse_req_init_context(struct fuse_req *req)
{
129
130
	req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
	req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
131
132
133
	req->in.h.pid = current->pid;
}

134
135
136
137
138
139
140
void fuse_set_initialized(struct fuse_conn *fc)
{
	/* Make sure stores before this are seen on another CPU */
	smp_wmb();
	fc->initialized = 1;
}

141
142
143
144
145
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
	return !fc->initialized || (for_background && fc->blocked);
}

146
147
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
				       bool for_background)
148
{
149
150
	struct fuse_req *req;
	int err;
151
	atomic_inc(&fc->num_waiting);
152
153
154
155
156
157

	if (fuse_block_alloc(fc, for_background)) {
		sigset_t oldset;
		int intr;

		block_sigs(&oldset);
158
		intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
159
160
161
162
163
164
				!fuse_block_alloc(fc, for_background));
		restore_sigs(&oldset);
		err = -EINTR;
		if (intr)
			goto out;
	}
165
166
	/* Matches smp_wmb() in fuse_set_initialized() */
	smp_rmb();
167

168
169
170
171
	err = -ENOTCONN;
	if (!fc->connected)
		goto out;

172
173
174
175
	err = -ECONNREFUSED;
	if (fc->conn_error)
		goto out;

176
	req = fuse_request_alloc(npages);
177
	err = -ENOMEM;
178
179
180
	if (!req) {
		if (for_background)
			wake_up(&fc->blocked_waitq);
181
		goto out;
182
	}
183

184
	fuse_req_init_context(req);
Miklos Szeredi's avatar
Miklos Szeredi committed
185
186
187
188
	__set_bit(FR_WAITING, &req->flags);
	if (for_background)
		__set_bit(FR_BACKGROUND, &req->flags);

189
	return req;
190
191
192
193

 out:
	atomic_dec(&fc->num_waiting);
	return ERR_PTR(err);
194
}
195
196
197
198
199

struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
{
	return __fuse_get_req(fc, npages, false);
}
200
EXPORT_SYMBOL_GPL(fuse_get_req);
201

202
203
204
205
206
207
208
struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
					     unsigned npages)
{
	return __fuse_get_req(fc, npages, true);
}
EXPORT_SYMBOL_GPL(fuse_get_req_for_background);

209
210
211
212
213
214
215
216
217
218
219
220
/*
 * Return request in fuse_file->reserved_req.  However that may
 * currently be in use.  If that is the case, wait for it to become
 * available.
 */
static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
					 struct file *file)
{
	struct fuse_req *req = NULL;
	struct fuse_file *ff = file->private_data;

	do {
221
		wait_event(fc->reserved_req_waitq, ff->reserved_req);
222
223
224
225
		spin_lock(&fc->lock);
		if (ff->reserved_req) {
			req = ff->reserved_req;
			ff->reserved_req = NULL;
Al Viro's avatar
Al Viro committed
226
			req->stolen_file = get_file(file);
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
		}
		spin_unlock(&fc->lock);
	} while (!req);

	return req;
}

/*
 * Put stolen request back into fuse_file->reserved_req
 */
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
{
	struct file *file = req->stolen_file;
	struct fuse_file *ff = file->private_data;

	spin_lock(&fc->lock);
243
	fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
244
245
	BUG_ON(ff->reserved_req);
	ff->reserved_req = req;
246
	wake_up_all(&fc->reserved_req_waitq);
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
	spin_unlock(&fc->lock);
	fput(file);
}

/*
 * Gets a requests for a file operation, always succeeds
 *
 * This is used for sending the FLUSH request, which must get to
 * userspace, due to POSIX locks which may need to be unlocked.
 *
 * If allocation fails due to OOM, use the reserved request in
 * fuse_file.
 *
 * This is very unlikely to deadlock accidentally, since the
 * filesystem should not have it's own file open.  If deadlock is
 * intentional, it can still be broken by "aborting" the filesystem.
 */
264
265
struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
					     struct file *file)
266
267
268
269
{
	struct fuse_req *req;

	atomic_inc(&fc->num_waiting);
270
	wait_event(fc->blocked_waitq, fc->initialized);
271
272
	/* Matches smp_wmb() in fuse_set_initialized() */
	smp_rmb();
273
	req = fuse_request_alloc(0);
274
275
276
277
	if (!req)
		req = get_reserved_req(fc, file);

	fuse_req_init_context(req);
Miklos Szeredi's avatar
Miklos Szeredi committed
278
279
	__set_bit(FR_WAITING, &req->flags);
	__clear_bit(FR_BACKGROUND, &req->flags);
280
281
282
	return req;
}

283
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
284
285
{
	if (atomic_dec_and_test(&req->count)) {
Miklos Szeredi's avatar
Miklos Szeredi committed
286
		if (test_bit(FR_BACKGROUND, &req->flags)) {
287
288
289
290
291
292
293
294
295
296
			/*
			 * We get here in the unlikely case that a background
			 * request was allocated but not sent
			 */
			spin_lock(&fc->lock);
			if (!fc->blocked)
				wake_up(&fc->blocked_waitq);
			spin_unlock(&fc->lock);
		}

Miklos Szeredi's avatar
Miklos Szeredi committed
297
298
		if (test_bit(FR_WAITING, &req->flags)) {
			__clear_bit(FR_WAITING, &req->flags);
299
			atomic_dec(&fc->num_waiting);
Miklos Szeredi's avatar
Miklos Szeredi committed
300
		}
301
302
303
304
305

		if (req->stolen_file)
			put_reserved_req(fc, req);
		else
			fuse_request_free(req);
306
307
	}
}
308
EXPORT_SYMBOL_GPL(fuse_put_request);
309

310
311
312
313
314
315
316
317
318
319
320
static unsigned len_args(unsigned numargs, struct fuse_arg *args)
{
	unsigned nbytes = 0;
	unsigned i;

	for (i = 0; i < numargs; i++)
		nbytes += args[i].size;

	return nbytes;
}

321
static u64 fuse_get_unique(struct fuse_iqueue *fiq)
322
{
323
	return ++fiq->reqctr;
324
325
}

326
static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
327
328
329
{
	req->in.h.len = sizeof(struct fuse_in_header) +
		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
330
	list_add_tail(&req->list, &fiq->pending);
Miklos Szeredi's avatar
Miklos Szeredi committed
331
	wake_up_locked(&fiq->waitq);
332
	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
333
334
}

335
336
337
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
		       u64 nodeid, u64 nlookup)
{
338
339
	struct fuse_iqueue *fiq = &fc->iq;

340
341
	forget->forget_one.nodeid = nodeid;
	forget->forget_one.nlookup = nlookup;
342

Miklos Szeredi's avatar
Miklos Szeredi committed
343
	spin_lock(&fiq->waitq.lock);
344
	if (fiq->connected) {
345
346
		fiq->forget_list_tail->next = forget;
		fiq->forget_list_tail = forget;
Miklos Szeredi's avatar
Miklos Szeredi committed
347
		wake_up_locked(&fiq->waitq);
348
		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
Miklos Szeredi's avatar
Miklos Szeredi committed
349
350
351
	} else {
		kfree(forget);
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
352
	spin_unlock(&fiq->waitq.lock);
353
354
}

355
356
static void flush_bg_queue(struct fuse_conn *fc)
{
357
	while (fc->active_background < fc->max_background &&
358
359
	       !list_empty(&fc->bg_queue)) {
		struct fuse_req *req;
360
		struct fuse_iqueue *fiq = &fc->iq;
361
362
363
364

		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
		list_del(&req->list);
		fc->active_background++;
Miklos Szeredi's avatar
Miklos Szeredi committed
365
		spin_lock(&fiq->waitq.lock);
366
367
		req->in.h.unique = fuse_get_unique(fiq);
		queue_request(fiq, req);
Miklos Szeredi's avatar
Miklos Szeredi committed
368
		spin_unlock(&fiq->waitq.lock);
369
370
371
	}
}

372
373
/*
 * This function is called when a request is finished.  Either a reply
374
 * has arrived or it was aborted (and not yet sent) or some error
375
 * occurred during communication with userspace, or the device file
376
377
378
 * was closed.  The requester thread is woken up (if still waiting),
 * the 'end' callback is called if given, else the reference to the
 * request is released
379
 *
380
 * Called with fc->lock, unlocks it
381
382
 */
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi's avatar
Miklos Szeredi committed
383
__releases(fc->lock)
384
{
Miklos Szeredi's avatar
Miklos Szeredi committed
385
	struct fuse_iqueue *fiq = &fc->iq;
386
387
	void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
	req->end = NULL;
Miklos Szeredi's avatar
Miklos Szeredi committed
388
	list_del_init(&req->list);
Miklos Szeredi's avatar
Miklos Szeredi committed
389
	spin_lock(&fiq->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
390
	list_del_init(&req->intr_entry);
Miklos Szeredi's avatar
Miklos Szeredi committed
391
	spin_unlock(&fiq->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
392
393
	WARN_ON(test_bit(FR_PENDING, &req->flags));
	WARN_ON(test_bit(FR_SENT, &req->flags));
394
	smp_wmb();
Miklos Szeredi's avatar
Miklos Szeredi committed
395
	set_bit(FR_FINISHED, &req->flags);
Miklos Szeredi's avatar
Miklos Szeredi committed
396
397
	if (test_bit(FR_BACKGROUND, &req->flags)) {
		clear_bit(FR_BACKGROUND, &req->flags);
398
		if (fc->num_background == fc->max_background)
399
			fc->blocked = 0;
400
401

		/* Wake up next waiter, if any */
Miklos Szeredi's avatar
Miklos Szeredi committed
402
		if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
403
404
			wake_up(&fc->blocked_waitq);

405
		if (fc->num_background == fc->congestion_threshold &&
406
		    fc->connected && fc->bdi_initialized) {
407
408
			clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
			clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
409
		}
410
		fc->num_background--;
411
412
		fc->active_background--;
		flush_bg_queue(fc);
413
	}
414
415
416
417
	spin_unlock(&fc->lock);
	wake_up(&req->waitq);
	if (end)
		end(fc, req);
418
	fuse_put_request(fc, req);
419
420
}

421
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
422
{
Miklos Szeredi's avatar
Miklos Szeredi committed
423
	spin_lock(&fiq->waitq.lock);
424
425
426
427
	if (list_empty(&req->intr_entry)) {
		list_add_tail(&req->intr_entry, &fiq->interrupts);
		wake_up_locked(&fiq->waitq);
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
428
	spin_unlock(&fiq->waitq.lock);
429
	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
430
431
}

432
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
433
{
Miklos Szeredi's avatar
Miklos Szeredi committed
434
	struct fuse_iqueue *fiq = &fc->iq;
435
436
	int err;

437
438
	if (!fc->no_interrupt) {
		/* Any signal may interrupt this */
439
		err = wait_event_interruptible(req->waitq,
Miklos Szeredi's avatar
Miklos Szeredi committed
440
					test_bit(FR_FINISHED, &req->flags));
441
		if (!err)
442
443
			return;

Miklos Szeredi's avatar
Miklos Szeredi committed
444
		set_bit(FR_INTERRUPTED, &req->flags);
445
446
		/* matches barrier in fuse_dev_do_read() */
		smp_mb__after_atomic();
Miklos Szeredi's avatar
Miklos Szeredi committed
447
		if (test_bit(FR_SENT, &req->flags))
Miklos Szeredi's avatar
Miklos Szeredi committed
448
			queue_interrupt(fiq, req);
449
450
	}

Miklos Szeredi's avatar
Miklos Szeredi committed
451
	if (!test_bit(FR_FORCE, &req->flags)) {
452
453
454
		sigset_t oldset;

		/* Only fatal signals may interrupt this */
455
		block_sigs(&oldset);
456
		err = wait_event_interruptible(req->waitq,
Miklos Szeredi's avatar
Miklos Szeredi committed
457
					test_bit(FR_FINISHED, &req->flags));
458
		restore_sigs(&oldset);
Miklos Szeredi's avatar
Miklos Szeredi committed
459

460
		if (!err)
Miklos Szeredi's avatar
Miklos Szeredi committed
461
462
			return;

Miklos Szeredi's avatar
Miklos Szeredi committed
463
		spin_lock(&fiq->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
464
		/* Request is not yet in userspace, bail out */
Miklos Szeredi's avatar
Miklos Szeredi committed
465
		if (test_bit(FR_PENDING, &req->flags)) {
Miklos Szeredi's avatar
Miklos Szeredi committed
466
			list_del(&req->list);
Miklos Szeredi's avatar
Miklos Szeredi committed
467
			spin_unlock(&fiq->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
468
469
470
471
			__fuse_put_request(req);
			req->out.h.error = -EINTR;
			return;
		}
Miklos Szeredi's avatar
Miklos Szeredi committed
472
		spin_unlock(&fiq->waitq.lock);
473
	}
474

Miklos Szeredi's avatar
Miklos Szeredi committed
475
476
477
478
	/*
	 * Either request is already in userspace, or it was forced.
	 * Wait it out.
	 */
Miklos Szeredi's avatar
Miklos Szeredi committed
479
	wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
480
481
}

482
static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
483
{
484
485
	struct fuse_iqueue *fiq = &fc->iq;

Miklos Szeredi's avatar
Miklos Szeredi committed
486
	BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
Miklos Szeredi's avatar
Miklos Szeredi committed
487
	spin_lock(&fiq->waitq.lock);
488
	if (!fiq->connected) {
Miklos Szeredi's avatar
Miklos Szeredi committed
489
		spin_unlock(&fiq->waitq.lock);
490
		req->out.h.error = -ENOTCONN;
491
	} else {
492
493
		req->in.h.unique = fuse_get_unique(fiq);
		queue_request(fiq, req);
494
495
496
		/* acquire extra reference, since request is still needed
		   after request_end() */
		__fuse_get_request(req);
Miklos Szeredi's avatar
Miklos Szeredi committed
497
		spin_unlock(&fiq->waitq.lock);
498

499
		request_wait_answer(fc, req);
500
501
		/* Pairs with smp_wmb() in request_end() */
		smp_rmb();
502
503
	}
}
504
505
506

void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
Miklos Szeredi's avatar
Miklos Szeredi committed
507
508
509
	__set_bit(FR_ISREPLY, &req->flags);
	if (!test_bit(FR_WAITING, &req->flags)) {
		__set_bit(FR_WAITING, &req->flags);
510
511
		atomic_inc(&fc->num_waiting);
	}
512
513
	__fuse_request_send(fc, req);
}
514
EXPORT_SYMBOL_GPL(fuse_request_send);
515

516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
{
	if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
		args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;

	if (fc->minor < 9) {
		switch (args->in.h.opcode) {
		case FUSE_LOOKUP:
		case FUSE_CREATE:
		case FUSE_MKNOD:
		case FUSE_MKDIR:
		case FUSE_SYMLINK:
		case FUSE_LINK:
			args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
			break;
		case FUSE_GETATTR:
		case FUSE_SETATTR:
			args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
			break;
		}
	}
	if (fc->minor < 12) {
		switch (args->in.h.opcode) {
		case FUSE_CREATE:
			args->in.args[0].size = sizeof(struct fuse_open_in);
			break;
		case FUSE_MKNOD:
			args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
			break;
		}
	}
}

549
550
551
552
553
554
555
556
557
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
	struct fuse_req *req;
	ssize_t ret;

	req = fuse_get_req(fc, 0);
	if (IS_ERR(req))
		return PTR_ERR(req);

558
559
560
	/* Needs to be done after fuse_get_req() so that fc->minor is valid */
	fuse_adjust_compat(fc, args);

561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
	req->in.h.opcode = args->in.h.opcode;
	req->in.h.nodeid = args->in.h.nodeid;
	req->in.numargs = args->in.numargs;
	memcpy(req->in.args, args->in.args,
	       args->in.numargs * sizeof(struct fuse_in_arg));
	req->out.argvar = args->out.argvar;
	req->out.numargs = args->out.numargs;
	memcpy(req->out.args, args->out.args,
	       args->out.numargs * sizeof(struct fuse_arg));
	fuse_request_send(fc, req);
	ret = req->out.h.error;
	if (!ret && args->out.argvar) {
		BUG_ON(args->out.numargs != 1);
		ret = req->out.args[0].size;
	}
	fuse_put_request(fc, req);

	return ret;
}

581
582
583
584
585
586
587
/*
 * Called under fc->lock
 *
 * fc->connected must have been checked previously
 */
void fuse_request_send_background_locked(struct fuse_conn *fc,
					 struct fuse_req *req)
588
{
Miklos Szeredi's avatar
Miklos Szeredi committed
589
590
591
	BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
	if (!test_bit(FR_WAITING, &req->flags)) {
		__set_bit(FR_WAITING, &req->flags);
592
593
		atomic_inc(&fc->num_waiting);
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
594
	__set_bit(FR_ISREPLY, &req->flags);
595
	fc->num_background++;
596
	if (fc->num_background == fc->max_background)
597
		fc->blocked = 1;
598
	if (fc->num_background == fc->congestion_threshold &&
599
	    fc->bdi_initialized) {
600
601
		set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
		set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
602
603
604
605
606
	}
	list_add_tail(&req->list, &fc->bg_queue);
	flush_bg_queue(fc);
}

607
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
608
{
609
	BUG_ON(!req->end);
610
	spin_lock(&fc->lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
611
	if (fc->connected) {
612
		fuse_request_send_background_locked(fc, req);
613
		spin_unlock(&fc->lock);
614
	} else {
615
		spin_unlock(&fc->lock);
616
		req->out.h.error = -ENOTCONN;
617
618
		req->end(fc, req);
		fuse_put_request(fc, req);
619
620
	}
}
621
EXPORT_SYMBOL_GPL(fuse_request_send_background);
622

Miklos Szeredi's avatar
Miklos Szeredi committed
623
624
625
626
static int fuse_request_send_notify_reply(struct fuse_conn *fc,
					  struct fuse_req *req, u64 unique)
{
	int err = -ENODEV;
627
	struct fuse_iqueue *fiq = &fc->iq;
Miklos Szeredi's avatar
Miklos Szeredi committed
628

Miklos Szeredi's avatar
Miklos Szeredi committed
629
	__clear_bit(FR_ISREPLY, &req->flags);
Miklos Szeredi's avatar
Miklos Szeredi committed
630
	req->in.h.unique = unique;
Miklos Szeredi's avatar
Miklos Szeredi committed
631
	spin_lock(&fiq->waitq.lock);
632
	if (fiq->connected) {
633
		queue_request(fiq, req);
Miklos Szeredi's avatar
Miklos Szeredi committed
634
635
		err = 0;
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
636
	spin_unlock(&fiq->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
637
638
639
640

	return err;
}

641
642
void fuse_force_forget(struct file *file, u64 nodeid)
{
643
	struct inode *inode = file_inode(file);
644
645
646
647
648
649
	struct fuse_conn *fc = get_fuse_conn(inode);
	struct fuse_req *req;
	struct fuse_forget_in inarg;

	memset(&inarg, 0, sizeof(inarg));
	inarg.nlookup = 1;
650
	req = fuse_get_req_nofail_nopages(fc, file);
651
652
653
654
655
	req->in.h.opcode = FUSE_FORGET;
	req->in.h.nodeid = nodeid;
	req->in.numargs = 1;
	req->in.args[0].size = sizeof(inarg);
	req->in.args[0].value = &inarg;
Miklos Szeredi's avatar
Miklos Szeredi committed
656
	__clear_bit(FR_ISREPLY, &req->flags);
657
658
659
	__fuse_request_send(fc, req);
	/* ignore errors */
	fuse_put_request(fc, req);
660
661
}

662
663
664
/*
 * Lock the request.  Up to the next unlock_request() there mustn't be
 * anything that could cause a page-fault.  If the request was already
665
 * aborted bail out.
666
 */
667
static int lock_request(struct fuse_req *req)
668
669
670
{
	int err = 0;
	if (req) {
671
		spin_lock(&req->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
672
		if (test_bit(FR_ABORTED, &req->flags))
673
674
			err = -ENOENT;
		else
Miklos Szeredi's avatar
Miklos Szeredi committed
675
			set_bit(FR_LOCKED, &req->flags);
676
		spin_unlock(&req->waitq.lock);
677
678
679
680
681
	}
	return err;
}

/*
Miklos Szeredi's avatar
Miklos Szeredi committed
682
683
 * Unlock request.  If it was aborted while locked, caller is responsible
 * for unlocking and ending the request.
684
 */
685
static int unlock_request(struct fuse_req *req)
686
{
Miklos Szeredi's avatar
Miklos Szeredi committed
687
	int err = 0;
688
	if (req) {
689
		spin_lock(&req->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
690
		if (test_bit(FR_ABORTED, &req->flags))
Miklos Szeredi's avatar
Miklos Szeredi committed
691
692
			err = -ENOENT;
		else
Miklos Szeredi's avatar
Miklos Szeredi committed
693
			clear_bit(FR_LOCKED, &req->flags);
694
		spin_unlock(&req->waitq.lock);
695
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
696
	return err;
697
698
699
700
701
}

struct fuse_copy_state {
	int write;
	struct fuse_req *req;
702
	struct iov_iter *iter;
703
704
705
	struct pipe_buffer *pipebufs;
	struct pipe_buffer *currbuf;
	struct pipe_inode_info *pipe;
706
707
708
	unsigned long nr_segs;
	struct page *pg;
	unsigned len;
709
	unsigned offset;
710
	unsigned move_pages:1;
711
712
};

713
static void fuse_copy_init(struct fuse_copy_state *cs, int write,
714
			   struct iov_iter *iter)
715
716
717
{
	memset(cs, 0, sizeof(*cs));
	cs->write = write;
718
	cs->iter = iter;
719
720
721
}

/* Unmap and put previous page of userspace buffer */
722
static void fuse_copy_finish(struct fuse_copy_state *cs)
723
{
724
725
726
	if (cs->currbuf) {
		struct pipe_buffer *buf = cs->currbuf;

727
		if (cs->write)
728
			buf->len = PAGE_SIZE - cs->len;
729
		cs->currbuf = NULL;
730
	} else if (cs->pg) {
731
732
733
734
735
736
		if (cs->write) {
			flush_dcache_page(cs->pg);
			set_page_dirty_lock(cs->pg);
		}
		put_page(cs->pg);
	}
737
	cs->pg = NULL;
738
739
740
741
742
743
744
745
}

/*
 * Get another pagefull of userspace buffer, and map it to kernel
 * address space, and lock request
 */
static int fuse_copy_fill(struct fuse_copy_state *cs)
{
746
	struct page *page;
747
748
	int err;

749
	err = unlock_request(cs->req);
Miklos Szeredi's avatar
Miklos Szeredi committed
750
751
752
	if (err)
		return err;

753
	fuse_copy_finish(cs);
754
755
756
	if (cs->pipebufs) {
		struct pipe_buffer *buf = cs->pipebufs;

757
758
759
760
761
762
763
		if (!cs->write) {
			err = buf->ops->confirm(cs->pipe, buf);
			if (err)
				return err;

			BUG_ON(!cs->nr_segs);
			cs->currbuf = buf;
764
765
			cs->pg = buf->page;
			cs->offset = buf->offset;
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
			cs->len = buf->len;
			cs->pipebufs++;
			cs->nr_segs--;
		} else {
			if (cs->nr_segs == cs->pipe->buffers)
				return -EIO;

			page = alloc_page(GFP_HIGHUSER);
			if (!page)
				return -ENOMEM;

			buf->page = page;
			buf->offset = 0;
			buf->len = 0;

			cs->currbuf = buf;
782
783
			cs->pg = page;
			cs->offset = 0;
784
785
786
787
			cs->len = PAGE_SIZE;
			cs->pipebufs++;
			cs->nr_segs++;
		}
788
	} else {
789
790
		size_t off;
		err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
791
792
		if (err < 0)
			return err;
793
794
795
		BUG_ON(!err);
		cs->len = err;
		cs->offset = off;
796
		cs->pg = page;
797
798
		cs->offset = off;
		iov_iter_advance(cs->iter, err);
799
800
	}

801
	return lock_request(cs->req);
802
803
804
}

/* Do as much copy to/from userspace buffer as we can */
805
static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
806
807
808
{
	unsigned ncpy = min(*size, cs->len);
	if (val) {
809
810
811
		void *pgaddr = kmap_atomic(cs->pg);
		void *buf = pgaddr + cs->offset;

812
		if (cs->write)
813
			memcpy(buf, *val, ncpy);
814
		else
815
816
817
			memcpy(*val, buf, ncpy);

		kunmap_atomic(pgaddr);
818
819
820
821
		*val += ncpy;
	}
	*size -= ncpy;
	cs->len -= ncpy;
822
	cs->offset += ncpy;
823
824
825
	return ncpy;
}

826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
static int fuse_check_page(struct page *page)
{
	if (page_mapcount(page) ||
	    page->mapping != NULL ||
	    page_count(page) != 1 ||
	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
	     ~(1 << PG_locked |
	       1 << PG_referenced |
	       1 << PG_uptodate |
	       1 << PG_lru |
	       1 << PG_active |
	       1 << PG_reclaim))) {
		printk(KERN_WARNING "fuse: trying to steal weird page\n");
		printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
		return 1;
	}
	return 0;
}

static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
{
	int err;
	struct page *oldpage = *pagep;
	struct page *newpage;
	struct pipe_buffer *buf = cs->pipebufs;

852
	err = unlock_request(cs->req);
Miklos Szeredi's avatar
Miklos Szeredi committed
853
854
855
	if (err)
		return err;

856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
	fuse_copy_finish(cs);

	err = buf->ops->confirm(cs->pipe, buf);
	if (err)
		return err;

	BUG_ON(!cs->nr_segs);
	cs->currbuf = buf;
	cs->len = buf->len;
	cs->pipebufs++;
	cs->nr_segs--;

	if (cs->len != PAGE_SIZE)
		goto out_fallback;

	if (buf->ops->steal(cs->pipe, buf) != 0)
		goto out_fallback;

	newpage = buf->page;

876
877
	if (!PageUptodate(newpage))
		SetPageUptodate(newpage);
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896

	ClearPageMappedToDisk(newpage);

	if (fuse_check_page(newpage) != 0)
		goto out_fallback_unlock;

	/*
	 * This is a new and locked page, it shouldn't be mapped or
	 * have any special flags on it
	 */
	if (WARN_ON(page_mapped(oldpage)))
		goto out_fallback_unlock;
	if (WARN_ON(page_has_private(oldpage)))
		goto out_fallback_unlock;
	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
		goto out_fallback_unlock;
	if (WARN_ON(PageMlocked(oldpage)))
		goto out_fallback_unlock;

897
	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
898
	if (err) {
899
900
		unlock_page(newpage);
		return err;
901
	}
902

903
904
905
906
907
908
	page_cache_get(newpage);

	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
		lru_cache_add_file(newpage);

	err = 0;
909
	spin_lock(&cs->req->waitq.lock);
Miklos Szeredi's avatar
Miklos Szeredi committed
910
	if (test_bit(FR_ABORTED, &cs->req->flags))
911
912
913
		err = -ENOENT;
	else
		*pagep = newpage;
914
	spin_unlock(&cs->req->waitq.lock);
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930

	if (err) {
		unlock_page(newpage);
		page_cache_release(newpage);
		return err;
	}

	unlock_page(oldpage);
	page_cache_release(oldpage);
	cs->len = 0;

	return 0;

out_fallback_unlock:
	unlock_page(newpage);
out_fallback:
931
932
	cs->pg = buf->page;
	cs->offset = buf->offset;
933

934
	err = lock_request(cs->req);
935
936
937
938
939
940
	if (err)
		return err;

	return 1;
}

941
942
943
944
static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
			 unsigned offset, unsigned count)
{
	struct pipe_buffer *buf;
Miklos Szeredi's avatar
Miklos Szeredi committed
945
	int err;
946
947
948
949

	if (cs->nr_segs == cs->pipe->buffers)
		return -EIO;

950
	err = unlock_request(cs->req);
Miklos Szeredi's avatar
Miklos Szeredi committed
951
952
953
	if (err)
		return err;

954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
	fuse_copy_finish(cs);

	buf = cs->pipebufs;
	page_cache_get(page);
	buf->page = page;
	buf->offset = offset;
	buf->len = count;

	cs->pipebufs++;
	cs->nr_segs++;
	cs->len = 0;

	return 0;
}

969
970
971
972
/*
 * Copy a page in the request to/from the userspace buffer.  Must be
 * done atomically
 */
973
static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
974
			  unsigned offset, unsigned count, int zeroing)
975
{
976
977
978
	int err;
	struct page *page = *pagep;

979
980
981
	if (page && zeroing && count < PAGE_SIZE)
		clear_highpage(page);

982
	while (count) {
983
984
985
		if (cs->write && cs->pipebufs && page) {
			return fuse_ref_page(cs, page, offset, count);
		} else if (!cs->len) {
986
987
988
989
990
991
992
993
994
995
			if (cs->move_pages && page &&
			    offset == 0 && count == PAGE_SIZE) {
				err = fuse_try_move_page(cs, pagep);
				if (err <= 0)
					return err;
			} else {
				err = fuse_copy_fill(cs);
				if (err)
					return err;
			}
Miklos Szeredi's avatar
Miklos Szeredi committed
996
		}
997
		if (page) {
998
			void *mapaddr = kmap_atomic(page);
999
1000
			void *buf = mapaddr + offset;
			offset += fuse_copy_do(cs, &buf, &count);
1001
			kunmap_atomic(mapaddr);
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
		} else
			offset += fuse_copy_do(cs, NULL, &count);
	}
	if (page && !cs->write)
		flush_dcache_page(page);
	return 0;
}

/* Copy pages in the request to/from userspace buffer */
static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
			   int zeroing)
{
	unsigned i;
	struct fuse_req *req = cs->req;

	for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1018
		int err;
1019
1020
		unsigned offset = req->page_descs[i].offset;
		unsigned count = min(nbytes, req->page_descs[i].length);
1021
1022
1023

		err = fuse_copy_page(cs, &req->pages[i], offset, count,
				     zeroing);
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
		if (err)
			return err;

		nbytes -= count;
	}
	return 0;
}

/* Copy a single argument in the request to/from userspace buffer */
static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
{
	while (size) {
Miklos Szeredi's avatar
Miklos Szeredi committed
1036
1037
1038
1039
1040
		if (!cs->len) {
			int err = fuse_copy_fill(cs);
			if (err)
				return err;
		}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
		fuse_copy_do(cs, &val, &size);
	}
	return 0;
}

/* Copy request arguments to/from userspace buffer */
static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
			  unsigned argpages, struct fuse_arg *args,
			  int zeroing)
{
	int err = 0;
	unsigned i;

	for (i = 0; !err && i < numargs; i++)  {
		struct fuse_arg *arg = &args[i];
		if (i == numargs - 1 && argpages)
			err = fuse_copy_pages(cs, arg->size, zeroing);
		else
			err = fuse_copy_one(cs, arg->value, arg->size);
	}
	return err;
}

1064
static int forget_pending(struct fuse_iqueue *fiq)
1065
{
1066
	return fiq->forget_list_head.next != NULL;
1067
1068
}

1069
static int request_pending(struct fuse_iqueue *fiq)
1070
{
1071
1072
	return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
		forget_pending(fiq);
1073
1074
1075
1076
1077
1078
1079
1080
}

/*
 * Transfer an interrupt request to userspace
 *
 * Unlike other requests this is assembled on demand, without a need
 * to allocate a separate fuse_req structure.
 *
1081
 * Called with fiq->waitq.lock held, releases it
1082
 */
1083
1084
static int fuse_read_interrupt(struct fuse_iqueue *fiq,
			       struct fuse_copy_state *cs,
1085
			       size_t nbytes, struct fuse_req *req)
1086
__releases(fiq->waitq.lock)
1087