sh_eth.c 76 KB
Newer Older
Sergei Shtylyov's avatar
Sergei Shtylyov committed
1
/*  SuperH Ethernet device driver
2
 *
3
 *  Copyright (C) 2014  Renesas Electronics Corporation
4
 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5
 *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6
 *  Copyright (C) 2013-2016 Cogent Embedded, Inc.
7
 *  Copyright (C) 2014 Codethink Limited
8
9
10
11
12
13
14
15
16
17
18
19
20
21
 *
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms and conditions of the GNU General Public License,
 *  version 2, as published by the Free Software Foundation.
 *
 *  This program is distributed in the hope it will be useful, but WITHOUT
 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 *  more details.
 *
 *  The full GNU General Public License is included in this distribution in
 *  the file called "COPYING".
 */

22
23
24
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
25
#include <linux/interrupt.h>
26
27
28
29
30
31
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
#include <linux/netdevice.h>
32
33
34
35
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
36
37
38
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
39
#include <linux/pm_runtime.h>
40
#include <linux/slab.h>
41
#include <linux/ethtool.h>
42
#include <linux/if_vlan.h>
43
#include <linux/clk.h>
44
#include <linux/sh_eth.h>
45
#include <linux/of_mdio.h>
46
47
48

#include "sh_eth.h"

49
50
51
52
53
54
#define SH_ETH_DEF_MSG_ENABLE \
		(NETIF_MSG_LINK	| \
		NETIF_MSG_TIMER	| \
		NETIF_MSG_RX_ERR| \
		NETIF_MSG_TX_ERR)

55
56
#define SH_ETH_OFFSET_INVALID	((u16)~0)

57
58
59
#define SH_ETH_OFFSET_DEFAULTS			\
	[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID

60
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
61
62
	SH_ETH_OFFSET_DEFAULTS,

63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
	[EDSR]		= 0x0000,
	[EDMR]		= 0x0400,
	[EDTRR]		= 0x0408,
	[EDRRR]		= 0x0410,
	[EESR]		= 0x0428,
	[EESIPR]	= 0x0430,
	[TDLAR]		= 0x0010,
	[TDFAR]		= 0x0014,
	[TDFXR]		= 0x0018,
	[TDFFR]		= 0x001c,
	[RDLAR]		= 0x0030,
	[RDFAR]		= 0x0034,
	[RDFXR]		= 0x0038,
	[RDFFR]		= 0x003c,
	[TRSCER]	= 0x0438,
	[RMFCR]		= 0x0440,
	[TFTR]		= 0x0448,
	[FDR]		= 0x0450,
	[RMCR]		= 0x0458,
	[RPADIR]	= 0x0460,
	[FCFTR]		= 0x0468,
	[CSMR]		= 0x04E4,

	[ECMR]		= 0x0500,
	[ECSR]		= 0x0510,
	[ECSIPR]	= 0x0518,
	[PIR]		= 0x0520,
	[PSR]		= 0x0528,
	[PIPR]		= 0x052c,
	[RFLR]		= 0x0508,
	[APR]		= 0x0554,
	[MPR]		= 0x0558,
	[PFTCR]		= 0x055c,
	[PFRCR]		= 0x0560,
	[TPAUSER]	= 0x0564,
	[GECMR]		= 0x05b0,
	[BCULR]		= 0x05b4,
	[MAHR]		= 0x05c0,
	[MALR]		= 0x05c8,
	[TROCR]		= 0x0700,
	[CDCR]		= 0x0708,
	[LCCR]		= 0x0710,
	[CEFCR]		= 0x0740,
	[FRECR]		= 0x0748,
	[TSFRCR]	= 0x0750,
	[TLFRCR]	= 0x0758,
	[RFCR]		= 0x0760,
	[CERCR]		= 0x0768,
	[CEECR]		= 0x0770,
	[MAFCR]		= 0x0778,
	[RMII_MII]	= 0x0790,

	[ARSTR]		= 0x0000,
	[TSU_CTRST]	= 0x0004,
	[TSU_FWEN0]	= 0x0010,
	[TSU_FWEN1]	= 0x0014,
	[TSU_FCM]	= 0x0018,
	[TSU_BSYSL0]	= 0x0020,
	[TSU_BSYSL1]	= 0x0024,
	[TSU_PRISL0]	= 0x0028,
	[TSU_PRISL1]	= 0x002c,
	[TSU_FWSL0]	= 0x0030,
	[TSU_FWSL1]	= 0x0034,
	[TSU_FWSLC]	= 0x0038,
	[TSU_QTAG0]	= 0x0040,
	[TSU_QTAG1]	= 0x0044,
	[TSU_FWSR]	= 0x0050,
	[TSU_FWINMK]	= 0x0054,
	[TSU_ADQT0]	= 0x0048,
	[TSU_ADQT1]	= 0x004c,
	[TSU_VTAG0]	= 0x0058,
	[TSU_VTAG1]	= 0x005c,
	[TSU_ADSBSY]	= 0x0060,
	[TSU_TEN]	= 0x0064,
	[TSU_POST1]	= 0x0070,
	[TSU_POST2]	= 0x0074,
	[TSU_POST3]	= 0x0078,
	[TSU_POST4]	= 0x007c,
	[TSU_ADRH0]	= 0x0100,

	[TXNLCR0]	= 0x0080,
	[TXALCR0]	= 0x0084,
	[RXNLCR0]	= 0x0088,
	[RXALCR0]	= 0x008c,
	[FWNLCR0]	= 0x0090,
	[FWALCR0]	= 0x0094,
	[TXNLCR1]	= 0x00a0,
	[TXALCR1]	= 0x00a0,
	[RXNLCR1]	= 0x00a8,
	[RXALCR1]	= 0x00ac,
	[FWNLCR1]	= 0x00b0,
	[FWALCR1]	= 0x00b4,
};

157
static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
158
159
	SH_ETH_OFFSET_DEFAULTS,

160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
	[EDSR]		= 0x0000,
	[EDMR]		= 0x0400,
	[EDTRR]		= 0x0408,
	[EDRRR]		= 0x0410,
	[EESR]		= 0x0428,
	[EESIPR]	= 0x0430,
	[TDLAR]		= 0x0010,
	[TDFAR]		= 0x0014,
	[TDFXR]		= 0x0018,
	[TDFFR]		= 0x001c,
	[RDLAR]		= 0x0030,
	[RDFAR]		= 0x0034,
	[RDFXR]		= 0x0038,
	[RDFFR]		= 0x003c,
	[TRSCER]	= 0x0438,
	[RMFCR]		= 0x0440,
	[TFTR]		= 0x0448,
	[FDR]		= 0x0450,
	[RMCR]		= 0x0458,
	[RPADIR]	= 0x0460,
	[FCFTR]		= 0x0468,
	[CSMR]		= 0x04E4,

	[ECMR]		= 0x0500,
	[RFLR]		= 0x0508,
	[ECSR]		= 0x0510,
	[ECSIPR]	= 0x0518,
	[PIR]		= 0x0520,
	[APR]		= 0x0554,
	[MPR]		= 0x0558,
	[PFTCR]		= 0x055c,
	[PFRCR]		= 0x0560,
	[TPAUSER]	= 0x0564,
	[MAHR]		= 0x05c0,
	[MALR]		= 0x05c8,
	[CEFCR]		= 0x0740,
	[FRECR]		= 0x0748,
	[TSFRCR]	= 0x0750,
	[TLFRCR]	= 0x0758,
	[RFCR]		= 0x0760,
	[MAFCR]		= 0x0778,

	[ARSTR]		= 0x0000,
	[TSU_CTRST]	= 0x0004,
	[TSU_VTAG0]	= 0x0058,
	[TSU_ADSBSY]	= 0x0060,
	[TSU_TEN]	= 0x0064,
	[TSU_ADRH0]	= 0x0100,

	[TXNLCR0]	= 0x0080,
	[TXALCR0]	= 0x0084,
	[RXNLCR0]	= 0x0088,
	[RXALCR0]	= 0x008C,
};

215
static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
216
217
	SH_ETH_OFFSET_DEFAULTS,

218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
	[ECMR]		= 0x0300,
	[RFLR]		= 0x0308,
	[ECSR]		= 0x0310,
	[ECSIPR]	= 0x0318,
	[PIR]		= 0x0320,
	[PSR]		= 0x0328,
	[RDMLR]		= 0x0340,
	[IPGR]		= 0x0350,
	[APR]		= 0x0354,
	[MPR]		= 0x0358,
	[RFCF]		= 0x0360,
	[TPAUSER]	= 0x0364,
	[TPAUSECR]	= 0x0368,
	[MAHR]		= 0x03c0,
	[MALR]		= 0x03c8,
	[TROCR]		= 0x03d0,
	[CDCR]		= 0x03d4,
	[LCCR]		= 0x03d8,
	[CNDCR]		= 0x03dc,
	[CEFCR]		= 0x03e4,
	[FRECR]		= 0x03e8,
	[TSFRCR]	= 0x03ec,
	[TLFRCR]	= 0x03f0,
	[RFCR]		= 0x03f4,
	[MAFCR]		= 0x03f8,

	[EDMR]		= 0x0200,
	[EDTRR]		= 0x0208,
	[EDRRR]		= 0x0210,
	[TDLAR]		= 0x0218,
	[RDLAR]		= 0x0220,
	[EESR]		= 0x0228,
	[EESIPR]	= 0x0230,
	[TRSCER]	= 0x0238,
	[RMFCR]		= 0x0240,
	[TFTR]		= 0x0248,
	[FDR]		= 0x0250,
	[RMCR]		= 0x0258,
	[TFUCR]		= 0x0264,
	[RFOCR]		= 0x0268,
258
	[RMIIMODE]      = 0x026c,
259
260
261
262
	[FCFTR]		= 0x0270,
	[TRIMD]		= 0x027c,
};

263
static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
264
265
	SH_ETH_OFFSET_DEFAULTS,

266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
	[ECMR]		= 0x0100,
	[RFLR]		= 0x0108,
	[ECSR]		= 0x0110,
	[ECSIPR]	= 0x0118,
	[PIR]		= 0x0120,
	[PSR]		= 0x0128,
	[RDMLR]		= 0x0140,
	[IPGR]		= 0x0150,
	[APR]		= 0x0154,
	[MPR]		= 0x0158,
	[TPAUSER]	= 0x0164,
	[RFCF]		= 0x0160,
	[TPAUSECR]	= 0x0168,
	[BCFRR]		= 0x016c,
	[MAHR]		= 0x01c0,
	[MALR]		= 0x01c8,
	[TROCR]		= 0x01d0,
	[CDCR]		= 0x01d4,
	[LCCR]		= 0x01d8,
	[CNDCR]		= 0x01dc,
	[CEFCR]		= 0x01e4,
	[FRECR]		= 0x01e8,
	[TSFRCR]	= 0x01ec,
	[TLFRCR]	= 0x01f0,
	[RFCR]		= 0x01f4,
	[MAFCR]		= 0x01f8,
	[RTRATE]	= 0x01fc,

	[EDMR]		= 0x0000,
	[EDTRR]		= 0x0008,
	[EDRRR]		= 0x0010,
	[TDLAR]		= 0x0018,
	[RDLAR]		= 0x0020,
	[EESR]		= 0x0028,
	[EESIPR]	= 0x0030,
	[TRSCER]	= 0x0038,
	[RMFCR]		= 0x0040,
	[TFTR]		= 0x0048,
	[FDR]		= 0x0050,
	[RMCR]		= 0x0058,
	[TFUCR]		= 0x0064,
	[RFOCR]		= 0x0068,
	[FCFTR]		= 0x0070,
	[RPADIR]	= 0x0078,
	[TRIMD]		= 0x007c,
	[RBWAR]		= 0x00c8,
	[RDFAR]		= 0x00cc,
	[TBRAR]		= 0x00d4,
	[TDFAR]		= 0x00d8,
};

static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
318
319
	SH_ETH_OFFSET_DEFAULTS,

320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
	[EDMR]		= 0x0000,
	[EDTRR]		= 0x0004,
	[EDRRR]		= 0x0008,
	[TDLAR]		= 0x000c,
	[RDLAR]		= 0x0010,
	[EESR]		= 0x0014,
	[EESIPR]	= 0x0018,
	[TRSCER]	= 0x001c,
	[RMFCR]		= 0x0020,
	[TFTR]		= 0x0024,
	[FDR]		= 0x0028,
	[RMCR]		= 0x002c,
	[EDOCR]		= 0x0030,
	[FCFTR]		= 0x0034,
	[RPADIR]	= 0x0038,
	[TRIMD]		= 0x003c,
	[RBWAR]		= 0x0040,
	[RDFAR]		= 0x0044,
	[TBRAR]		= 0x004c,
	[TDFAR]		= 0x0050,

341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
	[ECMR]		= 0x0160,
	[ECSR]		= 0x0164,
	[ECSIPR]	= 0x0168,
	[PIR]		= 0x016c,
	[MAHR]		= 0x0170,
	[MALR]		= 0x0174,
	[RFLR]		= 0x0178,
	[PSR]		= 0x017c,
	[TROCR]		= 0x0180,
	[CDCR]		= 0x0184,
	[LCCR]		= 0x0188,
	[CNDCR]		= 0x018c,
	[CEFCR]		= 0x0194,
	[FRECR]		= 0x0198,
	[TSFRCR]	= 0x019c,
	[TLFRCR]	= 0x01a0,
	[RFCR]		= 0x01a4,
	[MAFCR]		= 0x01a8,
	[IPGR]		= 0x01b4,
	[APR]		= 0x01b8,
	[MPR]		= 0x01bc,
	[TPAUSER]	= 0x01c4,
	[BCFR]		= 0x01cc,

	[ARSTR]		= 0x0000,
	[TSU_CTRST]	= 0x0004,
	[TSU_FWEN0]	= 0x0010,
	[TSU_FWEN1]	= 0x0014,
	[TSU_FCM]	= 0x0018,
	[TSU_BSYSL0]	= 0x0020,
	[TSU_BSYSL1]	= 0x0024,
	[TSU_PRISL0]	= 0x0028,
	[TSU_PRISL1]	= 0x002c,
	[TSU_FWSL0]	= 0x0030,
	[TSU_FWSL1]	= 0x0034,
	[TSU_FWSLC]	= 0x0038,
	[TSU_QTAGM0]	= 0x0040,
	[TSU_QTAGM1]	= 0x0044,
	[TSU_ADQT0]	= 0x0048,
	[TSU_ADQT1]	= 0x004c,
	[TSU_FWSR]	= 0x0050,
	[TSU_FWINMK]	= 0x0054,
	[TSU_ADSBSY]	= 0x0060,
	[TSU_TEN]	= 0x0064,
	[TSU_POST1]	= 0x0070,
	[TSU_POST2]	= 0x0074,
	[TSU_POST3]	= 0x0078,
	[TSU_POST4]	= 0x007c,

	[TXNLCR0]	= 0x0080,
	[TXALCR0]	= 0x0084,
	[RXNLCR0]	= 0x0088,
	[RXALCR0]	= 0x008c,
	[FWNLCR0]	= 0x0090,
	[FWALCR0]	= 0x0094,
	[TXNLCR1]	= 0x00a0,
	[TXALCR1]	= 0x00a0,
	[RXNLCR1]	= 0x00a8,
	[RXALCR1]	= 0x00ac,
	[FWNLCR1]	= 0x00b0,
	[FWALCR1]	= 0x00b4,

	[TSU_ADRH0]	= 0x0100,
};

406
407
408
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);

409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	u16 offset = mdp->reg_offset[enum_index];

	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
		return;

	iowrite32(data, mdp->addr + offset);
}

static u32 sh_eth_read(struct net_device *ndev, int enum_index)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	u16 offset = mdp->reg_offset[enum_index];

	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
		return ~0U;

	return ioread32(mdp->addr + offset);
}

431
432
433
434
435
436
437
static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
			  u32 set)
{
	sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
		     enum_index);
}

438
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
439
{
440
	return mdp->reg_offset == sh_eth_offset_gigabit;
441
442
}

443
444
445
446
447
static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
{
	return mdp->reg_offset == sh_eth_offset_fast_rz;
}

448
static void sh_eth_select_mii(struct net_device *ndev)
449
450
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
451
	u32 value;
452
453
454
455
456
457
458
459
460
461
462
463

	switch (mdp->phy_interface) {
	case PHY_INTERFACE_MODE_GMII:
		value = 0x2;
		break;
	case PHY_INTERFACE_MODE_MII:
		value = 0x1;
		break;
	case PHY_INTERFACE_MODE_RMII:
		value = 0x0;
		break;
	default:
464
465
		netdev_warn(ndev,
			    "PHY interface mode was not setup. Set to MII.\n");
466
467
468
469
470
471
472
		value = 0x1;
		break;
	}

	sh_eth_write(ndev, value, RMII_MII);
}

473
static void sh_eth_set_duplex(struct net_device *ndev)
474
475
476
{
	struct sh_eth_private *mdp = netdev_priv(ndev);

477
	sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
478
479
}

480
481
482
483
484
static void sh_eth_chip_reset(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);

	/* reset device */
485
	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
486
487
488
	mdelay(1);
}

489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
static void sh_eth_set_rate_gether(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);

	switch (mdp->speed) {
	case 10: /* 10BASE */
		sh_eth_write(ndev, GECMR_10, GECMR);
		break;
	case 100:/* 100BASE */
		sh_eth_write(ndev, GECMR_100, GECMR);
		break;
	case 1000: /* 1000BASE */
		sh_eth_write(ndev, GECMR_1000, GECMR);
		break;
	}
}

506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
#ifdef CONFIG_OF
/* R7S72100 */
static struct sh_eth_cpu_data r7s72100_data = {
	.chip_reset	= sh_eth_chip_reset,
	.set_duplex	= sh_eth_set_duplex,

	.register_type	= SH_ETH_REG_FAST_RZ,

	.ecsr_value	= ECSR_ICD,
	.ecsipr_value	= ECSIPR_ICDIP,
	.eesipr_value	= 0xff7f009f,

	.tx_check	= EESR_TC1 | EESR_FTC,
	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
			  EESR_TDE | EESR_ECI,
	.fdr_value	= 0x0000070f,

	.no_psr		= 1,
	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.hw_swap	= 1,
	.rpadir		= 1,
	.rpadir_value   = 2 << 16,
	.no_trimd	= 1,
	.no_ade		= 1,
	.hw_crc		= 1,
	.tsu		= 1,
	.shift_rd0	= 1,
};
537
538
539
540
541
542

static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);

	/* reset device */
543
	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
	mdelay(1);

	sh_eth_select_mii(ndev);
}

/* R8A7740 */
static struct sh_eth_cpu_data r8a7740_data = {
	.chip_reset	= sh_eth_chip_reset_r8a7740,
	.set_duplex	= sh_eth_set_duplex,
	.set_rate	= sh_eth_set_rate_gether,

	.register_type	= SH_ETH_REG_GIGABIT,

	.ecsr_value	= ECSR_ICD | ECSR_MPD,
	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,

	.tx_check	= EESR_TC1 | EESR_FTC,
	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
			  EESR_TDE | EESR_ECI,
	.fdr_value	= 0x0000070f,

	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.bculr		= 1,
	.hw_swap	= 1,
	.rpadir		= 1,
	.rpadir_value   = 2 << 16,
	.no_trimd	= 1,
	.no_ade		= 1,
	.tsu		= 1,
	.select_mii	= 1,
	.shift_rd0	= 1,
};
580

581
/* There is CPU dependent code */
582
static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
583
584
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
585

586
587
	switch (mdp->speed) {
	case 10: /* 10BASE */
588
		sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
589
590
		break;
	case 100:/* 100BASE */
591
		sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
592
593
594
595
		break;
	}
}

Sergei Shtylyov's avatar
Sergei Shtylyov committed
596
/* R8A7778/9 */
597
static struct sh_eth_cpu_data r8a777x_data = {
598
	.set_duplex	= sh_eth_set_duplex,
599
	.set_rate	= sh_eth_set_rate_r8a777x,
600

601
602
	.register_type	= SH_ETH_REG_FAST_RCAR,

603
604
605
606
607
	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
	.eesipr_value	= 0x01ff009f,

	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
608
609
610
	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
			  EESR_ECI,
611
	.fdr_value	= 0x00000f0f,
612
613
614
615
616
617
618

	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.hw_swap	= 1,
};

Sergei Shtylyov's avatar
Sergei Shtylyov committed
619
620
/* R8A7790/1 */
static struct sh_eth_cpu_data r8a779x_data = {
621
622
623
	.set_duplex	= sh_eth_set_duplex,
	.set_rate	= sh_eth_set_rate_r8a777x,

624
625
	.register_type	= SH_ETH_REG_FAST_RCAR,

626
627
628
629
630
	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
	.eesipr_value	= 0x01ff009f,

	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
631
632
633
	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
			  EESR_ECI,
634
	.fdr_value	= 0x00000f0f,
635

636
637
	.trscer_err_mask = DESC_I_RINT8,

638
639
640
641
642
643
	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.hw_swap	= 1,
	.rmiimode	= 1,
};
644
#endif /* CONFIG_OF */
645

646
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
647
648
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
649
650
651

	switch (mdp->speed) {
	case 10: /* 10BASE */
652
		sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
653
654
		break;
	case 100:/* 100BASE */
655
		sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
656
657
658
659
660
		break;
	}
}

/* SH7724 */
661
static struct sh_eth_cpu_data sh7724_data = {
662
	.set_duplex	= sh_eth_set_duplex,
663
	.set_rate	= sh_eth_set_rate_sh7724,
664

665
666
	.register_type	= SH_ETH_REG_FAST_SH4,

667
668
	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
669
	.eesipr_value	= 0x01ff009f,
670
671

	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
672
673
674
	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
			  EESR_ECI,
675
676
677
678
679

	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.hw_swap	= 1,
680
681
	.rpadir		= 1,
	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
682
};
683

684
static void sh_eth_set_rate_sh7757(struct net_device *ndev)
685
686
687
688
689
{
	struct sh_eth_private *mdp = netdev_priv(ndev);

	switch (mdp->speed) {
	case 10: /* 10BASE */
690
		sh_eth_write(ndev, 0, RTRATE);
691
692
		break;
	case 100:/* 100BASE */
693
		sh_eth_write(ndev, 1, RTRATE);
694
695
696
697
698
		break;
	}
}

/* SH7757 */
699
700
701
static struct sh_eth_cpu_data sh7757_data = {
	.set_duplex	= sh_eth_set_duplex,
	.set_rate	= sh_eth_set_rate_sh7757,
702

703
704
	.register_type	= SH_ETH_REG_FAST_SH4,

705
706
707
	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,

	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
708
709
710
	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
			  EESR_ECI,
711

712
	.irq_flags	= IRQF_SHARED,
713
714
715
716
717
	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.hw_swap	= 1,
	.no_ade		= 1,
718
719
	.rpadir		= 1,
	.rpadir_value   = 2 << 16,
720
	.rtrate		= 1,
721
};
722

723
#define SH_GIGA_ETH_BASE	0xfee00000UL
724
725
726
727
728
#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
static void sh_eth_chip_reset_giga(struct net_device *ndev)
{
	int i;
729
	u32 mahr[2], malr[2];
730
731
732

	/* save MAHR and MALR */
	for (i = 0; i < 2; i++) {
Yoshihiro Shimoda's avatar
Yoshihiro Shimoda committed
733
734
		malr[i] = ioread32((void *)GIGA_MALR(i));
		mahr[i] = ioread32((void *)GIGA_MAHR(i));
735
736
737
	}

	/* reset device */
738
	iowrite32(ARSTR_ARST, (void *)(SH_GIGA_ETH_BASE + 0x1800));
739
740
741
742
	mdelay(1);

	/* restore MAHR and MALR */
	for (i = 0; i < 2; i++) {
Yoshihiro Shimoda's avatar
Yoshihiro Shimoda committed
743
744
		iowrite32(malr[i], (void *)GIGA_MALR(i));
		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
	}
}

static void sh_eth_set_rate_giga(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);

	switch (mdp->speed) {
	case 10: /* 10BASE */
		sh_eth_write(ndev, 0x00000000, GECMR);
		break;
	case 100:/* 100BASE */
		sh_eth_write(ndev, 0x00000010, GECMR);
		break;
	case 1000: /* 1000BASE */
		sh_eth_write(ndev, 0x00000020, GECMR);
		break;
	}
}

/* SH7757(GETHERC) */
766
static struct sh_eth_cpu_data sh7757_data_giga = {
767
	.chip_reset	= sh_eth_chip_reset_giga,
768
	.set_duplex	= sh_eth_set_duplex,
769
770
	.set_rate	= sh_eth_set_rate_giga,

771
772
	.register_type	= SH_ETH_REG_GIGABIT,

773
774
775
776
777
	.ecsr_value	= ECSR_ICD | ECSR_MPD,
	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,

	.tx_check	= EESR_TC1 | EESR_FTC,
778
779
780
	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
			  EESR_TDE | EESR_ECI,
781
782
	.fdr_value	= 0x0000072f,

783
	.irq_flags	= IRQF_SHARED,
784
785
786
787
788
789
790
791
792
	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.bculr		= 1,
	.hw_swap	= 1,
	.rpadir		= 1,
	.rpadir_value   = 2 << 16,
	.no_trimd	= 1,
	.no_ade		= 1,
793
	.tsu		= 1,
794
795
};

796
797
/* SH7734 */
static struct sh_eth_cpu_data sh7734_data = {
798
799
	.chip_reset	= sh_eth_chip_reset,
	.set_duplex	= sh_eth_set_duplex,
800
801
	.set_rate	= sh_eth_set_rate_gether,

802
803
	.register_type	= SH_ETH_REG_GIGABIT,

804
805
806
807
808
	.ecsr_value	= ECSR_ICD | ECSR_MPD,
	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,

	.tx_check	= EESR_TC1 | EESR_FTC,
809
810
811
	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
			  EESR_TDE | EESR_ECI,
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829

	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.bculr		= 1,
	.hw_swap	= 1,
	.no_trimd	= 1,
	.no_ade		= 1,
	.tsu		= 1,
	.hw_crc		= 1,
	.select_mii	= 1,
};

/* SH7763 */
static struct sh_eth_cpu_data sh7763_data = {
	.chip_reset	= sh_eth_chip_reset,
	.set_duplex	= sh_eth_set_duplex,
	.set_rate	= sh_eth_set_rate_gether,
830

831
832
	.register_type	= SH_ETH_REG_GIGABIT,

833
834
835
836
837
	.ecsr_value	= ECSR_ICD | ECSR_MPD,
	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,

	.tx_check	= EESR_TC1 | EESR_FTC,
Sergei Shtylyov's avatar
Sergei Shtylyov committed
838
839
	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
840
841
842
843
844
845
846
847
848
			  EESR_ECI,

	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.bculr		= 1,
	.hw_swap	= 1,
	.no_trimd	= 1,
	.no_ade		= 1,
849
	.tsu		= 1,
850
	.irq_flags	= IRQF_SHARED,
851
852
};

853
static struct sh_eth_cpu_data sh7619_data = {
854
855
	.register_type	= SH_ETH_REG_FAST_SH3_SH2,

856
857
858
859
860
861
862
	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,

	.apr		= 1,
	.mpr		= 1,
	.tpauser	= 1,
	.hw_swap	= 1,
};
863
864

static struct sh_eth_cpu_data sh771x_data = {
865
866
	.register_type	= SH_ETH_REG_FAST_SH3_SH2,

867
	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
868
	.tsu		= 1,
869
870
871
872
873
874
875
876
877
878
879
};

static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
{
	if (!cd->ecsr_value)
		cd->ecsr_value = DEFAULT_ECSR_INIT;

	if (!cd->ecsipr_value)
		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;

	if (!cd->fcftr_value)
Sergei Shtylyov's avatar
Sergei Shtylyov committed
880
		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
881
882
883
884
885
886
887
888
889
890
				  DEFAULT_FIFO_F_D_RFD;

	if (!cd->fdr_value)
		cd->fdr_value = DEFAULT_FDR_INIT;

	if (!cd->tx_check)
		cd->tx_check = DEFAULT_TX_CHECK;

	if (!cd->eesr_err_check)
		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
891
892
893

	if (!cd->trscer_err_mask)
		cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
894
895
}

896
897
898
899
900
901
static int sh_eth_check_reset(struct net_device *ndev)
{
	int ret = 0;
	int cnt = 100;

	while (cnt > 0) {
902
		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
903
904
905
906
			break;
		mdelay(1);
		cnt--;
	}
907
	if (cnt <= 0) {
908
		netdev_err(ndev, "Device reset failed\n");
909
910
911
		ret = -ETIMEDOUT;
	}
	return ret;
912
}
913
914
915
916
917
918

static int sh_eth_reset(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	int ret = 0;

919
	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
920
		sh_eth_write(ndev, EDSR_ENALL, EDSR);
921
		sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
922
923
924

		ret = sh_eth_check_reset(ndev);
		if (ret)
925
			return ret;
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944

		/* Table Init */
		sh_eth_write(ndev, 0x0, TDLAR);
		sh_eth_write(ndev, 0x0, TDFAR);
		sh_eth_write(ndev, 0x0, TDFXR);
		sh_eth_write(ndev, 0x0, TDFFR);
		sh_eth_write(ndev, 0x0, RDLAR);
		sh_eth_write(ndev, 0x0, RDFAR);
		sh_eth_write(ndev, 0x0, RDFXR);
		sh_eth_write(ndev, 0x0, RDFFR);

		/* Reset HW CRC register */
		if (mdp->cd->hw_crc)
			sh_eth_write(ndev, 0x0, CSMR);

		/* Select MII mode */
		if (mdp->cd->select_mii)
			sh_eth_select_mii(ndev);
	} else {
945
		sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
946
		mdelay(3);
947
		sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
948
949
950
951
	}

	return ret;
}
952
953
954

static void sh_eth_set_receive_align(struct sk_buff *skb)
{
955
	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
956
957

	if (reserve)
958
		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
959
960
}

Sergei Shtylyov's avatar
Sergei Shtylyov committed
961
/* Program the hardware MAC address from dev->dev_addr. */
962
963
static void update_mac_address(struct net_device *ndev)
{
964
	sh_eth_write(ndev,
Sergei Shtylyov's avatar
Sergei Shtylyov committed
965
966
		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
967
	sh_eth_write(ndev,
Sergei Shtylyov's avatar
Sergei Shtylyov committed
968
		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
969
970
}

Sergei Shtylyov's avatar
Sergei Shtylyov committed
971
/* Get MAC address from SuperH MAC address register
972
973
974
975
976
977
 *
 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
 * When you want use this device, you must set MAC address in bootloader.
 *
 */
978
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
979
{
980
	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
981
		memcpy(ndev->dev_addr, mac, ETH_ALEN);
982
	} else {
983
984
985
986
987
988
989
990
991
		u32 mahr = sh_eth_read(ndev, MAHR);
		u32 malr = sh_eth_read(ndev, MALR);

		ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
		ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
		ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
		ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
		ndev->dev_addr[4] = (malr >>  8) & 0xFF;
		ndev->dev_addr[5] = (malr >>  0) & 0xFF;
992
	}
993
994
}

995
static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
996
{
997
	if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
998
999
1000
1001
1002
		return EDTRR_TRNS_GETHER;
	else
		return EDTRR_TRNS_ETHER;
}

1003
struct bb_info {
Yoshihiro Shimoda's avatar
Yoshihiro Shimoda committed
1004
	void (*set_gate)(void *addr);
1005
	struct mdiobb_ctrl ctrl;
Yoshihiro Shimoda's avatar
Yoshihiro Shimoda committed
1006
	void *addr;
1007
1008
};

1009
static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1010
1011
{
	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1012
	u32 pir;
1013
1014
1015
1016

	if (bitbang->set_gate)
		bitbang->set_gate(bitbang->addr);

1017
	pir = ioread32(bitbang->addr);
1018
	if (set)
1019
		pir |=  mask;
1020
	else
1021
1022
		pir &= ~mask;
	iowrite32(pir, bitbang->addr);
1023
1024
1025
1026
1027
1028
}

/* Data I/O pin control */
static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
{
	sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1029
1030
1031
1032
1033
}

/* Set bit data*/
static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
{
1034
	sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1035
1036
1037
1038
1039
1040
}

/* Get bit data*/
static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
{
	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1041
1042
1043
1044

	if (bitbang->set_gate)
		bitbang->set_gate(bitbang->addr);

1045
	return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1046
1047
1048
1049
1050
}

/* MDC pin control */
static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
{
1051
	sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
}

/* mdio bus control struct */
static struct mdiobb_ops bb_ops = {
	.owner = THIS_MODULE,
	.set_mdc = sh_mdc_ctrl,
	.set_mdio_dir = sh_mmd_ctrl,
	.set_mdio_data = sh_set_mdio,
	.get_mdio_data = sh_get_mdio,
};

/* free skb and descriptor buffer */
static void sh_eth_ring_free(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
1067
	int ringsize, i;
1068
1069
1070

	/* Free Rx skb ringbuffer */
	if (mdp->rx_skbuff) {
1071
1072
		for (i = 0; i < mdp->num_rx_ring; i++)
			dev_kfree_skb(mdp->rx_skbuff[i]);
1073
1074
	}
	kfree(mdp->rx_skbuff);
1075
	mdp->rx_skbuff = NULL;
1076
1077
1078

	/* Free Tx skb ringbuffer */
	if (mdp->tx_skbuff) {
1079
1080
		for (i = 0; i < mdp->num_tx_ring; i++)
			dev_kfree_skb(mdp->tx_skbuff[i]);
1081
1082
	}
	kfree(mdp->tx_skbuff);
1083
	mdp->tx_skbuff = NULL;
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097

	if (mdp->rx_ring) {
		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
		dma_free_coherent(NULL, ringsize, mdp->rx_ring,
				  mdp->rx_desc_dma);
		mdp->rx_ring = NULL;
	}

	if (mdp->tx_ring) {
		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
		dma_free_coherent(NULL, ringsize, mdp->tx_ring,
				  mdp->tx_desc_dma);
		mdp->tx_ring = NULL;
	}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
}

/* format skb and descriptor buffer */
static void sh_eth_ring_format(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	int i;
	struct sk_buff *skb;
	struct sh_eth_rxdesc *rxdesc = NULL;
	struct sh_eth_txdesc *txdesc = NULL;
1108
1109
	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1110
	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1111
	dma_addr_t dma_addr;
1112
	u32 buf_len;
1113

Sergei Shtylyov's avatar
Sergei Shtylyov committed
1114
1115
1116
1117
	mdp->cur_rx = 0;
	mdp->cur_tx = 0;
	mdp->dirty_rx = 0;
	mdp->dirty_tx = 0;
1118
1119
1120
1121

	memset(mdp->rx_ring, 0, rx_ringsize);

	/* build Rx ring buffer */
1122
	for (i = 0; i < mdp->num_rx_ring; i++) {
1123
1124
		/* skb */
		mdp->rx_skbuff[i] = NULL;
1125
		skb = netdev_alloc_skb(ndev, skbuff_size);
1126
1127
		if (skb == NULL)
			break;
1128
1129
		sh_eth_set_receive_align(skb);

1130
		/* The size of the buffer is a multiple of 32 bytes. */
1131
1132
		buf_len = ALIGN(mdp->rx_buf_sz, 32);
		dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
1133
1134
1135
1136
1137
1138
					  DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, dma_addr)) {
			kfree_skb(skb);
			break;
		}
		mdp->rx_skbuff[i] = skb;
1139
1140
1141
1142

		/* RX descriptor */
		rxdesc = &mdp->rx_ring[i];
		rxdesc->len = cpu_to_le32(buf_len << 16);
1143
1144
		rxdesc->addr = cpu_to_le32(dma_addr);
		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1145

1146
1147
		/* Rx descriptor address set */
		if (i == 0) {
1148
			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1149
1150
			if (sh_eth_is_gether(mdp) ||
			    sh_eth_is_rz_fast_ether(mdp))
1151
				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1152
		}
1153
1154
	}

1155
	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1156
1157

	/* Mark the last entry as wrapping the ring. */
1158
1159
	if (rxdesc)
		rxdesc->status |= cpu_to_le32(RD_RDLE);
1160
1161
1162
1163

	memset(mdp->tx_ring, 0, tx_ringsize);

	/* build Tx ring buffer */
1164
	for (i = 0; i < mdp->num_tx_ring; i++) {
1165
1166
		mdp->tx_skbuff[i] = NULL;
		txdesc = &mdp->tx_ring[i];
1167
1168
		txdesc->status = cpu_to_le32(TD_TFP);
		txdesc->len = cpu_to_le32(0);
1169
		if (i == 0) {
1170
			/* Tx descriptor address set */
1171
			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1172
1173
			if (sh_eth_is_gether(mdp) ||
			    sh_eth_is_rz_fast_ether(mdp))
1174
				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1175
		}
1176
1177
	}

1178
	txdesc->status |= cpu_to_le32(TD_TDLE);
1179
1180
1181
1182
1183
1184
}

/* Get skb and descriptor buffer */
static int sh_eth_ring_init(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
1185
	int rx_ringsize, tx_ringsize;
1186

Sergei Shtylyov's avatar
Sergei Shtylyov committed
1187
	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1188
1189
1190
1191
1192
1193
	 * card needs room to do 8 byte alignment, +2 so we can reserve
	 * the first 2 bytes, and +16 gets room for the status word from the
	 * card.
	 */
	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1194
1195
	if (mdp->cd->rpadir)
		mdp->rx_buf_sz += NET_IP_ALIGN;
1196
1197

	/* Allocate RX and TX skb rings */
1198
1199
	mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
				 GFP_KERNEL);
1200
1201
	if (!mdp->rx_skbuff)
		return -ENOMEM;
1202

1203
1204
	mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
				 GFP_KERNEL);
1205
	if (!mdp->tx_skbuff)
1206
		goto ring_free;
1207
1208

	/* Allocate all Rx descriptors. */
1209
	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1210
	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1211
					  GFP_KERNEL);
1212
	if (!mdp->rx_ring)
1213
		goto ring_free;
1214
1215
1216
1217

	mdp->dirty_rx = 0;

	/* Allocate all Tx descriptors. */
1218
	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1219
	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1220
					  GFP_KERNEL);
1221
	if (!mdp->tx_ring)
1222
		goto ring_free;
1223
	return 0;
1224

1225
1226
ring_free:
	/* Free Rx and Tx skb ring buffer and DMA buffer */
1227
1228
	sh_eth_ring_free(ndev);

1229
	return -ENOMEM;
1230
1231
}

1232
static int sh_eth_dev_init(struct net_device *ndev)
1233
1234
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
1235
	int ret;
1236
1237

	/* Soft Reset */
1238
1239
	ret = sh_eth_reset(ndev);
	if (ret)
1240
		return ret;
1241

1242
1243
1244
	if (mdp->cd->rmiimode)
		sh_eth_write(ndev, 0x1, RMIIMODE);

1245
1246
	/* Descriptor format */
	sh_eth_ring_format(ndev);
1247
	if (mdp->cd->rpadir)
1248
		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1249
1250

	/* all sh_eth int mask */
1251
	sh_eth_write(ndev, 0, EESIPR);
1252

1253
#if defined(__LITTLE_ENDIAN)
1254
	if (mdp->cd->hw_swap)
1255
		sh_eth_write(ndev, EDMR_EL, EDMR);
1256
	else
1257
#endif
1258
		sh_eth_write(ndev, 0, EDMR);
1259

1260
	/* FIFO size set */
1261
1262
	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
	sh_eth_write(ndev, 0, TFTR);
1263

1264
1265
	/* Frame recv control (enable multiple-packets per rx irq) */
	sh_eth_write(ndev, RMCR_RNC, RMCR);
1266

1267
	sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1268

1269
	if (mdp->cd->bculr)
1270
		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1271

1272
	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1273

1274
	if (!mdp->cd->no_trimd)
1275
		sh_eth_write(ndev, 0, TRIMD);