axon_msi.c 11.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
/*
 * Copyright 2007, Michael Ellerman, IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */


#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/msi.h>
16
#include <linux/of_platform.h>
17
#include <linux/debugfs.h>
18
#include <linux/slab.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69

#include <asm/dcr.h>
#include <asm/machdep.h>
#include <asm/prom.h>


/*
 * MSIC registers, specified as offsets from dcr_base
 */
#define MSIC_CTRL_REG	0x0

/* Base Address registers specify FIFO location in BE memory */
#define MSIC_BASE_ADDR_HI_REG	0x3
#define MSIC_BASE_ADDR_LO_REG	0x4

/* Hold the read/write offsets into the FIFO */
#define MSIC_READ_OFFSET_REG	0x5
#define MSIC_WRITE_OFFSET_REG	0x6


/* MSIC control register flags */
#define MSIC_CTRL_ENABLE		0x0001
#define MSIC_CTRL_FIFO_FULL_ENABLE	0x0002
#define MSIC_CTRL_IRQ_ENABLE		0x0008
#define MSIC_CTRL_FULL_STOP_ENABLE	0x0010

/*
 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
 * Currently we're using a 64KB FIFO size.
 */
#define MSIC_FIFO_SIZE_SHIFT	16
#define MSIC_FIFO_SIZE_BYTES	(1 << MSIC_FIFO_SIZE_SHIFT)

/*
 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
 * 8-9 of the MSIC control reg.
 */
#define MSIC_CTRL_FIFO_SIZE	(((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)

/*
 * We need to mask the read/write offsets to make sure they stay within
 * the bounds of the FIFO. Also they should always be 16-byte aligned.
 */
#define MSIC_FIFO_SIZE_MASK	((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)

/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
#define MSIC_FIFO_ENTRY_SIZE	0x10


struct axon_msic {
	struct irq_host *irq_host;
70
71
	__le32 *fifo_virt;
	dma_addr_t fifo_phys;
72
73
	dcr_host_t dcr_host;
	u32 read_offset;
74
75
76
#ifdef DEBUG
	u32 __iomem *trigger;
#endif
77
78
};

79
80
81
82
83
84
85
86
#ifdef DEBUG
void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
#else
static inline void axon_msi_debug_setup(struct device_node *dn,
					struct axon_msic *msic) { }
#endif


87
88
static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
{
89
	pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
90

91
	dcr_write(msic->dcr_host, dcr_n, val);
92
93
94
95
}

static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
96
	struct irq_chip *chip = get_irq_desc_chip(desc);
97
98
99
	struct axon_msic *msic = get_irq_data(irq);
	u32 write_offset, msi;
	int idx;
100
	int retry = 0;
101

102
	write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
103
	pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
104
105
106
107

	/* write_offset doesn't wrap properly, so we have to mask it */
	write_offset &= MSIC_FIFO_SIZE_MASK;

108
	while (msic->read_offset != write_offset && retry < 100) {
109
		idx  = msic->read_offset / sizeof(__le32);
110
		msi  = le32_to_cpu(msic->fifo_virt[idx]);
111
112
		msi &= 0xFFFF;

113
		pr_devel("axon_msi: woff %x roff %x msi %x\n",
114
115
			  write_offset, msic->read_offset, msi);

116
117
118
119
120
121
122
123
124
125
126
127
		if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
			generic_handle_irq(msi);
			msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
		} else {
			/*
			 * Reading the MSIC_WRITE_OFFSET_REG does not
			 * reliably flush the outstanding DMA to the
			 * FIFO buffer. Here we were reading stale
			 * data, so we need to retry.
			 */
			udelay(1);
			retry++;
128
			pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
129
130
131
132
			continue;
		}

		if (retry) {
133
			pr_devel("axon_msi: late irq 0x%x, retry %d\n",
134
135
136
137
				 msi, retry);
			retry = 0;
		}

138
139
		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
140
	}
141

142
143
144
145
146
	if (retry) {
		printk(KERN_WARNING "axon_msi: irq timed out\n");

		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
147
148
	}

149
	chip->irq_eoi(&desc->irq_data);
150
151
152
153
154
155
156
157
158
}

static struct axon_msic *find_msi_translator(struct pci_dev *dev)
{
	struct irq_host *irq_host;
	struct device_node *dn, *tmp;
	const phandle *ph;
	struct axon_msic *msic = NULL;

159
	dn = of_node_get(pci_device_to_OF_node(dev));
160
161
162
163
164
	if (!dn) {
		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
		return NULL;
	}

165
	for (; dn; dn = of_get_next_parent(dn)) {
166
167
168
169
170
171
172
173
174
175
176
177
178
		ph = of_get_property(dn, "msi-translator", NULL);
		if (ph)
			break;
	}

	if (!ph) {
		dev_dbg(&dev->dev,
			"axon_msi: no msi-translator property found\n");
		goto out_error;
	}

	tmp = dn;
	dn = of_find_node_by_phandle(*ph);
179
	of_node_put(tmp);
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
	if (!dn) {
		dev_dbg(&dev->dev,
			"axon_msi: msi-translator doesn't point to a node\n");
		goto out_error;
	}

	irq_host = irq_find_host(dn);
	if (!irq_host) {
		dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n",
			dn->full_name);
		goto out_error;
	}

	msic = irq_host->host_data;

out_error:
	of_node_put(dn);

	return msic;
}

static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
{
	if (!find_msi_translator(dev))
		return -ENODEV;

	return 0;
}

static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
{
211
	struct device_node *dn;
212
213
214
215
	struct msi_desc *entry;
	int len;
	const u32 *prop;

216
	dn = of_node_get(pci_device_to_OF_node(dev));
217
218
219
220
221
222
223
	if (!dn) {
		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
		return -ENODEV;
	}

	entry = list_first_entry(&dev->msi_list, struct msi_desc, list);

224
	for (; dn; dn = of_get_next_parent(dn)) {
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
		if (entry->msi_attrib.is_64) {
			prop = of_get_property(dn, "msi-address-64", &len);
			if (prop)
				break;
		}

		prop = of_get_property(dn, "msi-address-32", &len);
		if (prop)
			break;
	}

	if (!prop) {
		dev_dbg(&dev->dev,
			"axon_msi: no msi-address-(32|64) properties found\n");
		return -ENOENT;
	}

	switch (len) {
	case 8:
		msg->address_hi = prop[0];
		msg->address_lo = prop[1];
		break;
	case 4:
		msg->address_hi = 0;
		msg->address_lo = prop[0];
		break;
	default:
		dev_dbg(&dev->dev,
			"axon_msi: malformed msi-address-(32|64) property\n");
		of_node_put(dn);
		return -EINVAL;
	}

	of_node_put(dn);

	return 0;
}

static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
	unsigned int virq, rc;
	struct msi_desc *entry;
	struct msi_msg msg;
	struct axon_msic *msic;

	msic = find_msi_translator(dev);
	if (!msic)
		return -ENODEV;

	rc = setup_msi_msg_address(dev, &msg);
	if (rc)
		return rc;

	/* We rely on being able to stash a virq in a u16 */
	BUILD_BUG_ON(NR_IRQS > 65536);

	list_for_each_entry(entry, &dev->msi_list, list) {
		virq = irq_create_direct_mapping(msic->irq_host);
		if (virq == NO_IRQ) {
			dev_warn(&dev->dev,
				 "axon_msi: virq allocation failed!\n");
			return -1;
		}
		dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);

		set_irq_msi(virq, entry);
		msg.data = virq;
		write_msi_msg(virq, &msg);
	}

	return 0;
}

static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
{
	struct msi_desc *entry;

	dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");

	list_for_each_entry(entry, &dev->msi_list, list) {
		if (entry->irq == NO_IRQ)
			continue;

		set_irq_msi(entry->irq, NULL);
		irq_dispose_mapping(entry->irq);
	}
}

static struct irq_chip msic_irq_chip = {
314
315
316
	.irq_mask	= mask_msi_irq,
	.irq_unmask	= unmask_msi_irq,
	.irq_shutdown	= mask_msi_irq,
317
	.name		= "AXON-MSI",
318
319
320
321
322
323
324
325
326
327
328
329
330
331
};

static int msic_host_map(struct irq_host *h, unsigned int virq,
			 irq_hw_number_t hw)
{
	set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);

	return 0;
}

static struct irq_host_ops msic_host_ops = {
	.map	= msic_host_map,
};

332
static void axon_msi_shutdown(struct platform_device *device)
333
{
334
	struct axon_msic *msic = dev_get_drvdata(&device->dev);
335
336
	u32 tmp;

337
	pr_devel("axon_msi: disabling %s\n",
338
339
340
341
		  msic->irq_host->of_node->full_name);
	tmp  = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
	tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
	msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
342
343
}

344
static int axon_msi_probe(struct platform_device *device)
345
{
346
	struct device_node *dn = device->dev.of_node;
347
348
	struct axon_msic *msic;
	unsigned int virq;
349
	int dcr_base, dcr_len;
350

351
	pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
352
353
354
355
356
357
358
359

	msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
	if (!msic) {
		printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
		       dn->full_name);
		goto out;
	}

360
	dcr_base = dcr_resource_start(dn, 0);
361
362
	dcr_len = dcr_resource_len(dn, 0);

363
	if (dcr_base == 0 || dcr_len == 0) {
364
365
366
		printk(KERN_ERR
		       "axon_msi: couldn't parse dcr properties on %s\n",
			dn->full_name);
367
		goto out_free_msic;
368
369
	}

370
	msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
371
372
373
374
375
376
	if (!DCR_MAP_OK(msic->dcr_host)) {
		printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
		       dn->full_name);
		goto out_free_msic;
	}

377
378
379
	msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
					     &msic->fifo_phys, GFP_KERNEL);
	if (!msic->fifo_virt) {
380
381
382
383
384
		printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
		       dn->full_name);
		goto out_free_msic;
	}

385
386
387
388
389
390
	virq = irq_of_parse_and_map(dn, 0);
	if (virq == NO_IRQ) {
		printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
		       dn->full_name);
		goto out_free_fifo;
	}
391
	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
392

393
	msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
394
					NR_IRQS, &msic_host_ops, 0);
395
396
397
398
399
400
401
402
403
404
	if (!msic->irq_host) {
		printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
		       dn->full_name);
		goto out_free_fifo;
	}

	msic->irq_host->host_data = msic;

	set_irq_data(virq, msic);
	set_irq_chained_handler(virq, axon_msi_cascade);
405
	pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
406
407

	/* Enable the MSIC hardware */
408
	msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
409
	msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
410
				  msic->fifo_phys & 0xFFFFFFFF);
411
412
413
414
	msic_dcr_write(msic, MSIC_CTRL_REG,
			MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
			MSIC_CTRL_FIFO_SIZE);

415
416
417
	msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
				& MSIC_FIFO_SIZE_MASK;

418
	dev_set_drvdata(&device->dev, msic);
419
420
421
422

	ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
	ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
	ppc_md.msi_check_device = axon_msi_check_device;
423

424
425
	axon_msi_debug_setup(dn, msic);

426
427
428
429
430
	printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);

	return 0;

out_free_fifo:
431
432
	dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
			  msic->fifo_phys);
433
434
435
436
437
438
439
out_free_msic:
	kfree(msic);
out:

	return -1;
}

440
441
442
443
444
445
static const struct of_device_id axon_msi_device_id[] = {
	{
		.compatible	= "ibm,axon-msic"
	},
	{}
};
446

447
static struct platform_driver axon_msi_driver = {
448
449
	.probe		= axon_msi_probe,
	.shutdown	= axon_msi_shutdown,
450
451
452
453
	.driver = {
		.name = "axon-msi",
		.owner = THIS_MODULE,
		.of_match_table = axon_msi_device_id,
454
455
	},
};
456

457
458
static int __init axon_msi_init(void)
{
459
	return platform_driver_register(&axon_msi_driver);
460
}
461
subsys_initcall(axon_msi_init);
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486


#ifdef DEBUG
static int msic_set(void *data, u64 val)
{
	struct axon_msic *msic = data;
	out_le32(msic->trigger, val);
	return 0;
}

static int msic_get(void *data, u64 *val)
{
	*val = 0;
	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");

void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
{
	char name[8];
	u64 addr;

	addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
	if (addr == OF_BAD_ADDR) {
487
		pr_devel("axon_msi: couldn't translate reg property\n");
488
489
490
491
492
		return;
	}

	msic->trigger = ioremap(addr, 0x4);
	if (!msic->trigger) {
493
		pr_devel("axon_msi: ioremap failed\n");
494
495
496
497
498
499
500
		return;
	}

	snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));

	if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
				 msic, &fops_msic)) {
501
		pr_devel("axon_msi: debugfs_create_file failed!\n");
502
503
504
505
		return;
	}
}
#endif /* DEBUG */