messenger.c 87.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/ceph/ceph_debug.h>
Sage Weil's avatar
Sage Weil committed
3
4
5
6
7
8
9

#include <linux/crc32c.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/inet.h>
#include <linux/kthread.h>
#include <linux/net.h>
10
#include <linux/nsproxy.h>
11
#include <linux/sched/mm.h>
12
#include <linux/slab.h>
Sage Weil's avatar
Sage Weil committed
13
14
#include <linux/socket.h>
#include <linux/string.h>
15
#ifdef	CONFIG_BLOCK
16
#include <linux/bio.h>
17
#endif	/* CONFIG_BLOCK */
Noah Watkins's avatar
Noah Watkins committed
18
#include <linux/dns_resolver.h>
Sage Weil's avatar
Sage Weil committed
19
20
#include <net/tcp.h>

Ilya Dryomov's avatar
Ilya Dryomov committed
21
#include <linux/ceph/ceph_features.h>
22
23
24
25
#include <linux/ceph/libceph.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/pagelist.h>
26
#include <linux/export.h>
Sage Weil's avatar
Sage Weil committed
27
28
29
30
31
32
33
34
35
36

/*
 * Ceph uses the messenger to exchange ceph_msg messages with other
 * hosts in the system.  The messenger provides ordered and reliable
 * delivery.  We tolerate TCP disconnects by reconnecting (with
 * exponential backoff) in the case of a fault (disconnection, bad
 * crc, protocol error).  Acks allow sent messages to be discarded by
 * the sender.
 */

Alex Elder's avatar
Alex Elder committed
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
/*
 * We track the state of the socket on a given connection using
 * values defined below.  The transition to a new socket state is
 * handled by a function which verifies we aren't coming from an
 * unexpected state.
 *
 *      --------
 *      | NEW* |  transient initial state
 *      --------
 *          | con_sock_state_init()
 *          v
 *      ----------
 *      | CLOSED |  initialized, but no socket (and no
 *      ----------  TCP connection)
 *       ^      \
 *       |       \ con_sock_state_connecting()
 *       |        ----------------------
 *       |                              \
 *       + con_sock_state_closed()       \
56
57
58
59
60
61
62
63
64
65
66
 *       |+---------------------------    \
 *       | \                          \    \
 *       |  -----------                \    \
 *       |  | CLOSING |  socket event;  \    \
 *       |  -----------  await close     \    \
 *       |       ^                        \   |
 *       |       |                         \  |
 *       |       + con_sock_state_closing() \ |
 *       |      / \                         | |
 *       |     /   ---------------          | |
 *       |    /                   \         v v
Alex Elder's avatar
Alex Elder committed
67
68
69
70
71
72
73
74
75
76
77
 *       |   /                    --------------
 *       |  /    -----------------| CONNECTING |  socket created, TCP
 *       |  |   /                 --------------  connect initiated
 *       |  |   | con_sock_state_connected()
 *       |  |   v
 *      -------------
 *      | CONNECTED |  TCP connection established
 *      -------------
 *
 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
 */
78
79
80
81
82
83
84

#define CON_SOCK_STATE_NEW		0	/* -> CLOSED */
#define CON_SOCK_STATE_CLOSED		1	/* -> CONNECTING */
#define CON_SOCK_STATE_CONNECTING	2	/* -> CONNECTED or -> CLOSING */
#define CON_SOCK_STATE_CONNECTED	3	/* -> CLOSING or -> CLOSED */
#define CON_SOCK_STATE_CLOSING		4	/* -> CLOSED */

85
86
87
88
89
90
91
92
93
94
/*
 * connection states
 */
#define CON_STATE_CLOSED        1  /* -> PREOPEN */
#define CON_STATE_PREOPEN       2  /* -> CONNECTING, CLOSED */
#define CON_STATE_CONNECTING    3  /* -> NEGOTIATING, CLOSED */
#define CON_STATE_NEGOTIATING   4  /* -> OPEN, CLOSED */
#define CON_STATE_OPEN          5  /* -> STANDBY, CLOSED */
#define CON_STATE_STANDBY       6  /* -> PREOPEN, CLOSED */

Sage Weil's avatar
Sage Weil committed
95
96
97
98
99
100
101
102
103
/*
 * ceph_connection flag bits
 */
#define CON_FLAG_LOSSYTX           0  /* we can close channel or drop
				       * messages on errors */
#define CON_FLAG_KEEPALIVE_PENDING 1  /* we need to send a keepalive */
#define CON_FLAG_WRITE_PENDING	   2  /* we have data ready to send */
#define CON_FLAG_SOCK_CLOSED	   3  /* socket state changed to closed */
#define CON_FLAG_BACKOFF           4  /* need to retry queuing delayed work */
104

105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
static bool con_flag_valid(unsigned long con_flag)
{
	switch (con_flag) {
	case CON_FLAG_LOSSYTX:
	case CON_FLAG_KEEPALIVE_PENDING:
	case CON_FLAG_WRITE_PENDING:
	case CON_FLAG_SOCK_CLOSED:
	case CON_FLAG_BACKOFF:
		return true;
	default:
		return false;
	}
}

static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
{
	BUG_ON(!con_flag_valid(con_flag));

	clear_bit(con_flag, &con->flags);
}

static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
{
	BUG_ON(!con_flag_valid(con_flag));

	set_bit(con_flag, &con->flags);
}

static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
{
	BUG_ON(!con_flag_valid(con_flag));

	return test_bit(con_flag, &con->flags);
}

static bool con_flag_test_and_clear(struct ceph_connection *con,
					unsigned long con_flag)
{
	BUG_ON(!con_flag_valid(con_flag));

	return test_and_clear_bit(con_flag, &con->flags);
}

static bool con_flag_test_and_set(struct ceph_connection *con,
					unsigned long con_flag)
{
	BUG_ON(!con_flag_valid(con_flag));

	return test_and_set_bit(con_flag, &con->flags);
}

156
157
158
159
/* Slab caches for frequently-allocated structures */

static struct kmem_cache	*ceph_msg_cache;

Sage Weil's avatar
Sage Weil committed
160
161
162
163
/* static tag bytes (protocol control messages) */
static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
164
static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
Sage Weil's avatar
Sage Weil committed
165

166
167
168
169
#ifdef CONFIG_LOCKDEP
static struct lock_class_key socket_class;
#endif

Sage Weil's avatar
Sage Weil committed
170
static void queue_con(struct ceph_connection *con);
171
static void cancel_con(struct ceph_connection *con);
172
static void ceph_con_workfn(struct work_struct *);
173
static void con_fault(struct ceph_connection *con);
Sage Weil's avatar
Sage Weil committed
174
175

/*
176
177
 * Nicely render a sockaddr as a string.  An array of formatted
 * strings is used, to approximate reentrancy.
Sage Weil's avatar
Sage Weil committed
178
 */
179
180
181
182
183
184
185
#define ADDR_STR_COUNT_LOG	5	/* log2(# address strings in array) */
#define ADDR_STR_COUNT		(1 << ADDR_STR_COUNT_LOG)
#define ADDR_STR_COUNT_MASK	(ADDR_STR_COUNT - 1)
#define MAX_ADDR_STR_LEN	64	/* 54 is enough */

static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
static atomic_t addr_str_seq = ATOMIC_INIT(0);
Sage Weil's avatar
Sage Weil committed
186

187
188
static struct page *zero_page;		/* used in certain error cases */

189
const char *ceph_pr_addr(const struct sockaddr_storage *ss)
Sage Weil's avatar
Sage Weil committed
190
191
192
{
	int i;
	char *s;
193
194
	struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
	struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
Sage Weil's avatar
Sage Weil committed
195

196
	i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
Sage Weil's avatar
Sage Weil committed
197
198
199
200
	s = addr_str[i];

	switch (ss->ss_family) {
	case AF_INET:
201
202
		snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
			 ntohs(in4->sin_port));
Sage Weil's avatar
Sage Weil committed
203
204
205
		break;

	case AF_INET6:
206
207
		snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
			 ntohs(in6->sin6_port));
Sage Weil's avatar
Sage Weil committed
208
209
210
		break;

	default:
Alex Elder's avatar
Alex Elder committed
211
212
		snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
			 ss->ss_family);
Sage Weil's avatar
Sage Weil committed
213
214
215
216
	}

	return s;
}
217
EXPORT_SYMBOL(ceph_pr_addr);
Sage Weil's avatar
Sage Weil committed
218

219
220
221
222
223
224
static void encode_my_addr(struct ceph_messenger *msgr)
{
	memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
	ceph_encode_addr(&msgr->my_enc_addr);
}

Sage Weil's avatar
Sage Weil committed
225
226
227
/*
 * work queue for all reading and writing to/from the socket.
 */
228
static struct workqueue_struct *ceph_msgr_wq;
Sage Weil's avatar
Sage Weil committed
229

230
231
232
static int ceph_msgr_slab_init(void)
{
	BUG_ON(ceph_msg_cache);
Geliang Tang's avatar
Geliang Tang committed
233
	ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
234
235
236
	if (!ceph_msg_cache)
		return -ENOMEM;

237
	return 0;
238
239
240
241
242
243
244
245
246
}

static void ceph_msgr_slab_exit(void)
{
	BUG_ON(!ceph_msg_cache);
	kmem_cache_destroy(ceph_msg_cache);
	ceph_msg_cache = NULL;
}

247
static void _ceph_msgr_exit(void)
248
{
Alex Elder's avatar
Alex Elder committed
249
	if (ceph_msgr_wq) {
250
		destroy_workqueue(ceph_msgr_wq);
Alex Elder's avatar
Alex Elder committed
251
252
		ceph_msgr_wq = NULL;
	}
253
254

	BUG_ON(zero_page == NULL);
255
	put_page(zero_page);
256
	zero_page = NULL;
257
258

	ceph_msgr_slab_exit();
259
260
}

261
int __init ceph_msgr_init(void)
Sage Weil's avatar
Sage Weil committed
262
{
263
264
265
	if (ceph_msgr_slab_init())
		return -ENOMEM;

266
267
	BUG_ON(zero_page != NULL);
	zero_page = ZERO_PAGE(0);
268
	get_page(zero_page);
269

270
271
272
273
274
	/*
	 * The number of active work items is limited by the number of
	 * connections, so leave @max_active at default.
	 */
	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
275
276
	if (ceph_msgr_wq)
		return 0;
277

278
279
	pr_err("msgr_init failed to create workqueue\n");
	_ceph_msgr_exit();
280

281
	return -ENOMEM;
Sage Weil's avatar
Sage Weil committed
282
283
284
285
}

void ceph_msgr_exit(void)
{
286
287
	BUG_ON(ceph_msgr_wq == NULL);

288
	_ceph_msgr_exit();
Sage Weil's avatar
Sage Weil committed
289
290
}

Yehuda Sadeh's avatar
Yehuda Sadeh committed
291
void ceph_msgr_flush(void)
292
293
294
{
	flush_workqueue(ceph_msgr_wq);
}
295
EXPORT_SYMBOL(ceph_msgr_flush);
296

297
298
299
300
301
302
303
304
305
/* Connection socket state transition functions */

static void con_sock_state_init(struct ceph_connection *con)
{
	int old_state;

	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
	if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
		printk("%s: unexpected old state %d\n", __func__, old_state);
306
307
	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
	     CON_SOCK_STATE_CLOSED);
308
309
310
311
312
313
314
315
316
}

static void con_sock_state_connecting(struct ceph_connection *con)
{
	int old_state;

	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
	if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
		printk("%s: unexpected old state %d\n", __func__, old_state);
317
318
	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
	     CON_SOCK_STATE_CONNECTING);
319
320
321
322
323
324
325
326
327
}

static void con_sock_state_connected(struct ceph_connection *con)
{
	int old_state;

	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
		printk("%s: unexpected old state %d\n", __func__, old_state);
328
329
	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
	     CON_SOCK_STATE_CONNECTED);
330
331
332
333
334
335
336
337
338
339
340
}

static void con_sock_state_closing(struct ceph_connection *con)
{
	int old_state;

	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
			old_state != CON_SOCK_STATE_CONNECTED &&
			old_state != CON_SOCK_STATE_CLOSING))
		printk("%s: unexpected old state %d\n", __func__, old_state);
341
342
	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
	     CON_SOCK_STATE_CLOSING);
343
344
345
346
347
348
349
350
}

static void con_sock_state_closed(struct ceph_connection *con)
{
	int old_state;

	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
351
		    old_state != CON_SOCK_STATE_CLOSING &&
352
353
		    old_state != CON_SOCK_STATE_CONNECTING &&
		    old_state != CON_SOCK_STATE_CLOSED))
354
		printk("%s: unexpected old state %d\n", __func__, old_state);
355
356
	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
	     CON_SOCK_STATE_CLOSED);
357
}
358

Sage Weil's avatar
Sage Weil committed
359
360
361
362
363
/*
 * socket callback functions
 */

/* data available on socket, or listen socket received a connect */
364
static void ceph_sock_data_ready(struct sock *sk)
Sage Weil's avatar
Sage Weil committed
365
{
366
	struct ceph_connection *con = sk->sk_user_data;
367
368
369
	if (atomic_read(&con->msgr->stopping)) {
		return;
	}
370

Sage Weil's avatar
Sage Weil committed
371
	if (sk->sk_state != TCP_CLOSE_WAIT) {
Alex Elder's avatar
Alex Elder committed
372
		dout("%s on %p state = %lu, queueing work\n", __func__,
Sage Weil's avatar
Sage Weil committed
373
374
375
376
377
378
		     con, con->state);
		queue_con(con);
	}
}

/* socket has buffer space for writing */
Alex Elder's avatar
Alex Elder committed
379
static void ceph_sock_write_space(struct sock *sk)
Sage Weil's avatar
Sage Weil committed
380
{
Alex Elder's avatar
Alex Elder committed
381
	struct ceph_connection *con = sk->sk_user_data;
Sage Weil's avatar
Sage Weil committed
382

383
384
	/* only queue to workqueue if there is data we want to write,
	 * and there is sufficient space in the socket buffer to accept
Alex Elder's avatar
Alex Elder committed
385
	 * more data.  clear SOCK_NOSPACE so that ceph_sock_write_space()
386
387
388
389
	 * doesn't get called again until try_write() fills the socket
	 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
	 * and net/core/stream.c:sk_stream_write_space().
	 */
390
	if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
391
		if (sk_stream_is_writeable(sk)) {
Alex Elder's avatar
Alex Elder committed
392
			dout("%s %p queueing write work\n", __func__, con);
393
394
395
			clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
			queue_con(con);
		}
Sage Weil's avatar
Sage Weil committed
396
	} else {
Alex Elder's avatar
Alex Elder committed
397
		dout("%s %p nothing to write\n", __func__, con);
Sage Weil's avatar
Sage Weil committed
398
399
400
401
	}
}

/* socket's state has changed */
Alex Elder's avatar
Alex Elder committed
402
static void ceph_sock_state_change(struct sock *sk)
Sage Weil's avatar
Sage Weil committed
403
{
404
	struct ceph_connection *con = sk->sk_user_data;
Sage Weil's avatar
Sage Weil committed
405

Alex Elder's avatar
Alex Elder committed
406
	dout("%s %p state = %lu sk_state = %u\n", __func__,
Sage Weil's avatar
Sage Weil committed
407
408
409
410
	     con, con->state, sk->sk_state);

	switch (sk->sk_state) {
	case TCP_CLOSE:
Alex Elder's avatar
Alex Elder committed
411
		dout("%s TCP_CLOSE\n", __func__);
412
		/* fall through */
Sage Weil's avatar
Sage Weil committed
413
	case TCP_CLOSE_WAIT:
Alex Elder's avatar
Alex Elder committed
414
		dout("%s TCP_CLOSE_WAIT\n", __func__);
415
		con_sock_state_closing(con);
416
		con_flag_set(con, CON_FLAG_SOCK_CLOSED);
417
		queue_con(con);
Sage Weil's avatar
Sage Weil committed
418
419
		break;
	case TCP_ESTABLISHED:
Alex Elder's avatar
Alex Elder committed
420
		dout("%s TCP_ESTABLISHED\n", __func__);
421
		con_sock_state_connected(con);
Sage Weil's avatar
Sage Weil committed
422
423
		queue_con(con);
		break;
Alex Elder's avatar
Alex Elder committed
424
425
	default:	/* Everything else is uninteresting */
		break;
Sage Weil's avatar
Sage Weil committed
426
427
428
429
430
431
432
433
434
435
	}
}

/*
 * set up socket callbacks
 */
static void set_sock_callbacks(struct socket *sock,
			       struct ceph_connection *con)
{
	struct sock *sk = sock->sk;
436
	sk->sk_user_data = con;
Alex Elder's avatar
Alex Elder committed
437
438
439
	sk->sk_data_ready = ceph_sock_data_ready;
	sk->sk_write_space = ceph_sock_write_space;
	sk->sk_state_change = ceph_sock_state_change;
Sage Weil's avatar
Sage Weil committed
440
441
442
443
444
445
446
447
448
449
}


/*
 * socket helpers
 */

/*
 * initiate connection to a remote socket.
 */
450
static int ceph_tcp_connect(struct ceph_connection *con)
Sage Weil's avatar
Sage Weil committed
451
{
Sage Weil's avatar
Sage Weil committed
452
	struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
Sage Weil's avatar
Sage Weil committed
453
	struct socket *sock;
454
	unsigned int noio_flag;
Sage Weil's avatar
Sage Weil committed
455
456
457
	int ret;

	BUG_ON(con->sock);
458
459
460

	/* sock_create_kern() allocates with GFP_KERNEL */
	noio_flag = memalloc_noio_save();
461
	ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
462
			       SOCK_STREAM, IPPROTO_TCP, &sock);
463
	memalloc_noio_restore(noio_flag);
Sage Weil's avatar
Sage Weil committed
464
	if (ret)
465
		return ret;
466
	sock->sk->sk_allocation = GFP_NOFS;
Sage Weil's avatar
Sage Weil committed
467

468
469
470
471
#ifdef CONFIG_LOCKDEP
	lockdep_set_class(&sock->sk->sk_lock, &socket_class);
#endif

Sage Weil's avatar
Sage Weil committed
472
473
	set_sock_callbacks(sock, con);

474
	dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
Sage Weil's avatar
Sage Weil committed
475

476
	con_sock_state_connecting(con);
Sage Weil's avatar
Sage Weil committed
477
478
	ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
				 O_NONBLOCK);
Sage Weil's avatar
Sage Weil committed
479
480
	if (ret == -EINPROGRESS) {
		dout("connect %s EINPROGRESS sk_state = %u\n",
481
		     ceph_pr_addr(&con->peer_addr.in_addr),
Sage Weil's avatar
Sage Weil committed
482
		     sock->sk->sk_state);
483
	} else if (ret < 0) {
Sage Weil's avatar
Sage Weil committed
484
		pr_err("connect %s error %d\n",
485
		       ceph_pr_addr(&con->peer_addr.in_addr), ret);
Sage Weil's avatar
Sage Weil committed
486
		sock_release(sock);
487
		return ret;
488
	}
489

490
	if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) {
491
492
493
494
495
496
497
498
499
		int optval = 1;

		ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
					(char *)&optval, sizeof(optval));
		if (ret)
			pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
			       ret);
	}

500
	con->sock = sock;
501
	return 0;
Sage Weil's avatar
Sage Weil committed
502
503
}

504
505
506
/*
 * If @buf is NULL, discard up to @len bytes.
 */
Sage Weil's avatar
Sage Weil committed
507
508
509
510
static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
{
	struct kvec iov = {buf, len};
	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
511
	int r;
Sage Weil's avatar
Sage Weil committed
512

513
514
515
	if (!buf)
		msg.msg_flags |= MSG_TRUNC;

516
	iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len);
Al Viro's avatar
Al Viro committed
517
	r = sock_recvmsg(sock, &msg, msg.msg_flags);
518
519
520
	if (r == -EAGAIN)
		r = 0;
	return r;
Sage Weil's avatar
Sage Weil committed
521
522
}

523
524
525
static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
		     int page_offset, size_t length)
{
Al Viro's avatar
Al Viro committed
526
527
528
529
530
531
532
	struct bio_vec bvec = {
		.bv_page = page,
		.bv_offset = page_offset,
		.bv_len = length
	};
	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
	int r;
533
534

	BUG_ON(page_offset + length > PAGE_SIZE);
535
	iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length);
Al Viro's avatar
Al Viro committed
536
537
538
539
	r = sock_recvmsg(sock, &msg, msg.msg_flags);
	if (r == -EAGAIN)
		r = 0;
	return r;
540
541
}

Sage Weil's avatar
Sage Weil committed
542
543
544
545
546
/*
 * write something.  @more is true if caller will be sending more data
 * shortly.
 */
static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
547
			    size_t kvlen, size_t len, bool more)
Sage Weil's avatar
Sage Weil committed
548
549
{
	struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
550
	int r;
Sage Weil's avatar
Sage Weil committed
551
552
553
554
555
556

	if (more)
		msg.msg_flags |= MSG_MORE;
	else
		msg.msg_flags |= MSG_EOR;  /* superfluous, but what the hell */

557
558
559
560
	r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
	if (r == -EAGAIN)
		r = 0;
	return r;
Sage Weil's avatar
Sage Weil committed
561
562
}

563
564
565
/*
 * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST
 */
566
static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
567
			     int offset, size_t size, int more)
568
{
569
570
	ssize_t (*sendpage)(struct socket *sock, struct page *page,
			    int offset, size_t size, int flags);
571
	int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
572
573
	int ret;

574
575
576
577
578
579
580
581
582
	/*
	 * sendpage cannot properly handle pages with page_count == 0,
	 * we need to fall back to sendmsg if that's the case.
	 *
	 * Same goes for slab pages: skb_can_coalesce() allows
	 * coalescing neighboring slab objects into a single frag which
	 * triggers one of hardened usercopy checks.
	 */
	if (page_count(page) >= 1 && !PageSlab(page))
583
		sendpage = sock->ops->sendpage;
584
	else
585
		sendpage = sock_no_sendpage;
586

587
	ret = sendpage(sock, page, offset, size, flags);
588
589
	if (ret == -EAGAIN)
		ret = 0;
590
591
592

	return ret;
}
Sage Weil's avatar
Sage Weil committed
593
594
595
596
597
598

/*
 * Shutdown/close the socket for the given connection.
 */
static int con_close_socket(struct ceph_connection *con)
{
599
	int rc = 0;
Sage Weil's avatar
Sage Weil committed
600
601

	dout("con_close_socket on %p sock %p\n", con, con->sock);
602
603
604
605
606
	if (con->sock) {
		rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
		sock_release(con->sock);
		con->sock = NULL;
	}
607
608

	/*
Sage Weil's avatar
Sage Weil committed
609
	 * Forcibly clear the SOCK_CLOSED flag.  It gets set
610
611
612
613
	 * independent of the connection mutex, and we could have
	 * received a socket close event before we had the chance to
	 * shut the socket down.
	 */
614
	con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
615

616
	con_sock_state_closed(con);
Sage Weil's avatar
Sage Weil committed
617
618
619
620
621
622
623
624
625
626
	return rc;
}

/*
 * Reset a connection.  Discard all incoming and outgoing messages
 * and clear *_seq state.
 */
static void ceph_msg_remove(struct ceph_msg *msg)
{
	list_del_init(&msg->list_head);
627

Sage Weil's avatar
Sage Weil committed
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
	ceph_msg_put(msg);
}
static void ceph_msg_remove_list(struct list_head *head)
{
	while (!list_empty(head)) {
		struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
							list_head);
		ceph_msg_remove(msg);
	}
}

static void reset_connection(struct ceph_connection *con)
{
	/* reset connection, out_queue, msg_ and connect_seq */
	/* discard existing out_queue and msg_seq */
643
	dout("reset_connection %p\n", con);
Sage Weil's avatar
Sage Weil committed
644
645
646
	ceph_msg_remove_list(&con->out_queue);
	ceph_msg_remove_list(&con->out_sent);

647
	if (con->in_msg) {
648
		BUG_ON(con->in_msg->con != con);
649
650
651
652
		ceph_msg_put(con->in_msg);
		con->in_msg = NULL;
	}

Sage Weil's avatar
Sage Weil committed
653
654
	con->connect_seq = 0;
	con->out_seq = 0;
655
	if (con->out_msg) {
656
		BUG_ON(con->out_msg->con != con);
657
658
659
		ceph_msg_put(con->out_msg);
		con->out_msg = NULL;
	}
Sage Weil's avatar
Sage Weil committed
660
	con->in_seq = 0;
661
	con->in_seq_acked = 0;
Ilya Dryomov's avatar
Ilya Dryomov committed
662
663

	con->out_skip = 0;
Sage Weil's avatar
Sage Weil committed
664
665
666
667
668
669
670
}

/*
 * mark a peer down.  drop any open connections.
 */
void ceph_con_close(struct ceph_connection *con)
{
671
	mutex_lock(&con->mutex);
672
673
	dout("con_close %p peer %s\n", con,
	     ceph_pr_addr(&con->peer_addr.in_addr));
674
	con->state = CON_STATE_CLOSED;
675

676
677
678
679
	con_flag_clear(con, CON_FLAG_LOSSYTX);	/* so we retry next connect */
	con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
	con_flag_clear(con, CON_FLAG_WRITE_PENDING);
	con_flag_clear(con, CON_FLAG_BACKOFF);
680

Sage Weil's avatar
Sage Weil committed
681
	reset_connection(con);
Sage Weil's avatar
Sage Weil committed
682
	con->peer_global_seq = 0;
683
	cancel_con(con);
684
	con_close_socket(con);
685
	mutex_unlock(&con->mutex);
Sage Weil's avatar
Sage Weil committed
686
}
687
EXPORT_SYMBOL(ceph_con_close);
Sage Weil's avatar
Sage Weil committed
688
689
690
691

/*
 * Reopen a closed connection, with a new peer address.
 */
692
693
694
void ceph_con_open(struct ceph_connection *con,
		   __u8 entity_type, __u64 entity_num,
		   struct ceph_entity_addr *addr)
Sage Weil's avatar
Sage Weil committed
695
{
696
	mutex_lock(&con->mutex);
697
	dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
698

699
	WARN_ON(con->state != CON_STATE_CLOSED);
700
	con->state = CON_STATE_PREOPEN;
701

702
703
704
	con->peer_name.type = (__u8) entity_type;
	con->peer_name.num = cpu_to_le64(entity_num);

Sage Weil's avatar
Sage Weil committed
705
	memcpy(&con->peer_addr, addr, sizeof(*addr));
706
	con->delay = 0;      /* reset backoff memory */
707
	mutex_unlock(&con->mutex);
Sage Weil's avatar
Sage Weil committed
708
709
	queue_con(con);
}
710
EXPORT_SYMBOL(ceph_con_open);
Sage Weil's avatar
Sage Weil committed
711

712
713
714
715
716
717
718
719
/*
 * return true if this connection ever successfully opened
 */
bool ceph_con_opened(struct ceph_connection *con)
{
	return con->connect_seq > 0;
}

Sage Weil's avatar
Sage Weil committed
720
721
722
/*
 * initialize a new connection.
 */
723
724
void ceph_con_init(struct ceph_connection *con, void *private,
	const struct ceph_connection_operations *ops,
725
	struct ceph_messenger *msgr)
Sage Weil's avatar
Sage Weil committed
726
727
728
{
	dout("con_init %p\n", con);
	memset(con, 0, sizeof(*con));
729
730
	con->private = private;
	con->ops = ops;
Sage Weil's avatar
Sage Weil committed
731
	con->msgr = msgr;
732
733
734

	con_sock_state_init(con);

735
	mutex_init(&con->mutex);
Sage Weil's avatar
Sage Weil committed
736
737
	INIT_LIST_HEAD(&con->out_queue);
	INIT_LIST_HEAD(&con->out_sent);
738
	INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
739

740
	con->state = CON_STATE_CLOSED;
Sage Weil's avatar
Sage Weil committed
741
}
742
EXPORT_SYMBOL(ceph_con_init);
Sage Weil's avatar
Sage Weil committed
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760


/*
 * We maintain a global counter to order connection attempts.  Get
 * a unique seq greater than @gt.
 */
static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
{
	u32 ret;

	spin_lock(&msgr->global_seq_lock);
	if (msgr->global_seq < gt)
		msgr->global_seq = gt;
	ret = ++msgr->global_seq;
	spin_unlock(&msgr->global_seq_lock);
	return ret;
}

761
static void con_out_kvec_reset(struct ceph_connection *con)
762
{
Ilya Dryomov's avatar
Ilya Dryomov committed
763
764
	BUG_ON(con->out_skip);

765
766
767
768
769
	con->out_kvec_left = 0;
	con->out_kvec_bytes = 0;
	con->out_kvec_cur = &con->out_kvec[0];
}

770
static void con_out_kvec_add(struct ceph_connection *con,
771
772
				size_t size, void *data)
{
Ilya Dryomov's avatar
Ilya Dryomov committed
773
	int index = con->out_kvec_left;
774

Ilya Dryomov's avatar
Ilya Dryomov committed
775
	BUG_ON(con->out_skip);
776
777
778
779
780
781
782
	BUG_ON(index >= ARRAY_SIZE(con->out_kvec));

	con->out_kvec[index].iov_len = size;
	con->out_kvec[index].iov_base = data;
	con->out_kvec_left++;
	con->out_kvec_bytes += size;
}
Sage Weil's avatar
Sage Weil committed
783

Ilya Dryomov's avatar
Ilya Dryomov committed
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
/*
 * Chop off a kvec from the end.  Return residual number of bytes for
 * that kvec, i.e. how many bytes would have been written if the kvec
 * hadn't been nuked.
 */
static int con_out_kvec_skip(struct ceph_connection *con)
{
	int off = con->out_kvec_cur - con->out_kvec;
	int skip = 0;

	if (con->out_kvec_bytes > 0) {
		skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
		BUG_ON(con->out_kvec_bytes < skip);
		BUG_ON(!con->out_kvec_left);
		con->out_kvec_bytes -= skip;
		con->out_kvec_left--;
	}

	return skip;
}

805
#ifdef CONFIG_BLOCK
806
807
808
809
810
811

/*
 * For a bio data item, a piece is whatever remains of the next
 * entry in the current bio iovec, or the first entry in the next
 * bio in the list.
 */
812
static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
813
					size_t length)
814
{
815
	struct ceph_msg_data *data = cursor->data;
816
	struct ceph_bio_iter *it = &cursor->bio_iter;
817

818
819
820
821
	cursor->resid = min_t(size_t, length, data->bio_length);
	*it = data->bio_pos;
	if (cursor->resid < it->iter.bi_size)
		it->iter.bi_size = cursor->resid;
822

823
824
	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
825
826
}

827
static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
828
829
830
						size_t *page_offset,
						size_t *length)
{
831
832
	struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
					   cursor->bio_iter.iter);
833

834
835
836
	*page_offset = bv.bv_offset;
	*length = bv.bv_len;
	return bv.bv_page;
837
838
}

839
840
static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
					size_t bytes)
841
{
842
	struct ceph_bio_iter *it = &cursor->bio_iter;
843

844
845
	BUG_ON(bytes > cursor->resid);
	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
846
	cursor->resid -= bytes;
847
	bio_advance_iter(it->bio, &it->iter, bytes);
848

849
850
851
852
	if (!cursor->resid) {
		BUG_ON(!cursor->last_piece);
		return false;   /* no more data */
	}
853

854
	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
855
856
		return false;	/* more bytes to process in this segment */

857
858
859
860
861
	if (!it->iter.bi_size) {
		it->bio = it->bio->bi_next;
		it->iter = it->bio->bi_iter;
		if (cursor->resid < it->iter.bi_size)
			it->iter.bi_size = cursor->resid;
862
	}
863

864
865
866
	BUG_ON(cursor->last_piece);
	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
	cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
867
868
	return true;
}
869
#endif /* CONFIG_BLOCK */
870

871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
					size_t length)
{
	struct ceph_msg_data *data = cursor->data;
	struct bio_vec *bvecs = data->bvec_pos.bvecs;

	cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
	cursor->bvec_iter = data->bvec_pos.iter;
	cursor->bvec_iter.bi_size = cursor->resid;

	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
	cursor->last_piece =
	    cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
}

static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
						size_t *page_offset,
						size_t *length)
{
	struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
					   cursor->bvec_iter);

	*page_offset = bv.bv_offset;
	*length = bv.bv_len;
	return bv.bv_page;
}

static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
					size_t bytes)
{
	struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;

	BUG_ON(bytes > cursor->resid);
	BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
	cursor->resid -= bytes;
	bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);

	if (!cursor->resid) {
		BUG_ON(!cursor->last_piece);
		return false;   /* no more data */
	}

	if (!bytes || cursor->bvec_iter.bi_bvec_done)
		return false;	/* more bytes to process in this segment */

	BUG_ON(cursor->last_piece);
	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
	cursor->last_piece =
	    cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
	return true;
}

923
924
925
926
/*
 * For a page array, a piece comes from the first page in the array
 * that has not already been fully consumed.
 */
927
static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
928
					size_t length)
929
{
930
	struct ceph_msg_data *data = cursor->data;
931
932
933
934
935
936
937
	int page_count;

	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);

	BUG_ON(!data->pages);
	BUG_ON(!data->length);

938
	cursor->resid = min(length, data->length);
939
940
941
	page_count = calc_pages_for(data->alignment, (u64)data->length);
	cursor->page_offset = data->alignment & ~PAGE_MASK;
	cursor->page_index = 0;
942
943
944
	BUG_ON(page_count > (int)USHRT_MAX);
	cursor->page_count = (unsigned short)page_count;
	BUG_ON(length > SIZE_MAX - cursor->page_offset);
945
	cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
946
947
}

948
949
950
static struct page *
ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
					size_t *page_offset, size_t *length)
951
{
952
	struct ceph_msg_data *data = cursor->data;
953
954
955
956
957
958
959

	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);

	BUG_ON(cursor->page_index >= cursor->page_count);
	BUG_ON(cursor->page_offset >= PAGE_SIZE);

	*page_offset = cursor->page_offset;
960
	if (cursor->last_piece)
961
		*length = cursor->resid;
962
	else
963
964
965
966
967
		*length = PAGE_SIZE - *page_offset;

	return data->pages[cursor->page_index];
}

968
static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
969
970
						size_t bytes)
{
971
	BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
972
973
974
975
976
977

	BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);

	/* Advance the cursor page offset */

	cursor->resid -= bytes;
978
979
	cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
	if (!bytes || cursor->page_offset)
980
981
		return false;	/* more bytes to process in the current page */

982
983
984
	if (!cursor->resid)
		return false;   /* no more data */

985
	/* Move on to the next page; offset is already at 0 */
986
987
988

	BUG_ON(cursor->page_index >= cursor->page_count);
	cursor->page_index++;
989
	cursor->last_piece = cursor->resid <= PAGE_SIZE;
990
991
992
993

	return true;
}

994
/*
995
996
 * For a pagelist, a piece is whatever remains to be consumed in the
 * first page in the list, or the front of the next page.
997
 */
998
999
static void
ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
1000
					size_t length)
1001
{
1002
	struct ceph_msg_data *data = cursor->data;
1003
1004
1005
	struct ceph_pagelist *pagelist;
	struct page *page;

1006
	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1007
1008
1009

	pagelist = data->pagelist;
	BUG_ON(!pagelist);
1010
1011

	if (!length)
1012
1013
1014
1015
1016
		return;		/* pagelist can be assigned but empty */

	BUG_ON(list_empty(&pagelist->head));
	page = list_first_entry(&pagelist->head, struct page, lru);

1017
	cursor->resid = min(length, pagelist->length);
1018
1019
	cursor->page = page;
	cursor->offset = 0;
Alex Elder's avatar
Alex Elder committed
1020
	cursor->last_piece = cursor->resid <= PAGE_SIZE;
1021
1022
}

1023
1024
1025
static struct page *
ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
				size_t *page_offset, size_t *length)
1026
{
1027
	struct ceph_msg_data *data = cursor->data;
1028
1029
1030
1031
1032
1033
1034
1035
	struct ceph_pagelist *pagelist;

	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);

	pagelist = data->pagelist;
	BUG_ON(!pagelist);

	BUG_ON(!cursor->page);
1036
	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1037

1038
	/* offset of first page in pagelist is always 0 */
1039
	*page_offset = cursor->offset & ~PAGE_MASK;
1040
	if (cursor->last_piece)
1041
1042
1043
		*length = cursor->resid;
	else
		*length = PAGE_SIZE - *page_offset;
1044

1045
	return cursor->page;
1046
1047
}

1048
static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
1049
						size_t bytes)
1050
{
1051
	struct ceph_msg_data *data = cursor->data;
1052
1053
1054
1055
1056
1057
	struct ceph_pagelist *pagelist;

	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);

	pagelist = data->pagelist;
	BUG_ON(!pagelist);
1058
1059

	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1060
1061
1062
1063
	BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);

	/* Advance the cursor offset */

1064
	cursor->resid -= bytes;
1065
	cursor->offset += bytes;
1066
	/* offset of first page in pagelist is always 0 */
1067
1068
1069
	if (!bytes || cursor->offset & ~PAGE_MASK)
		return false;	/* more bytes to process in the current page */

1070
1071
1072
	if (!cursor->resid)
		return false;   /* no more data */

1073
1074
1075
	/* Move on to the next page */

	BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
1076
	cursor->page = list_next_entry(cursor->page, lru);
1077
	cursor->last_piece = cursor->resid <= PAGE_SIZE;
1078
1079
1080
1081

	return true;
}

1082
1083
1084
1085
1086
1087
1088
1089
/*
 * Message data is handled (sent or received) in pieces, where each
 * piece resides on a single page.  The network layer might not
 * consume an entire piece at once.  A data item's cursor keeps
 * track of which piece is next to process and how much remains to
 * be processed in that piece.  It also tracks whether the current
 * piece is the last one in the data item.
 */
1090
static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1091
{
1092
	size_t length = cursor->total_resid;
1093
1094

	switch (cursor->data->type) {
1095
	case CEPH_MSG_DATA_PAGELIST:
1096
		ceph_msg_data_pagelist_cursor_init(cursor, length);
1097
		break;
1098
	case CEPH_MSG_DATA_PAGES:
1099
		ceph_msg_data_pages_cursor_init(cursor, length);
1100
		break;
1101
1102
#ifdef CONFIG_BLOCK
	case CEPH_MSG_DATA_BIO:
1103
		ceph_msg_data_bio_cursor_init(cursor, length);
1104
		break;
1105
#endif /* CONFIG_BLOCK */
1106
1107
1108
	case CEPH_MSG_DATA_BVECS:
		ceph_msg_data_bvecs_cursor_init(cursor, length);
		break;
1109
	case CEPH_MSG_DATA_NONE: