irq.c 25.6 KB
Newer Older
1
/* irq.c: UltraSparc IRQ handling/init/registry.
Linus Torvalds's avatar
Linus Torvalds committed
2
 *
3
 * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net)
Linus Torvalds's avatar
Linus Torvalds committed
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
 */

#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
22
#include <linux/bootmem.h>
23
#include <linux/irq.h>
24
#include <linux/msi.h>
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27 28 29 30

#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
31
#include <asm/io.h>
Linus Torvalds's avatar
Linus Torvalds committed
32 33 34 35
#include <asm/sbus.h>
#include <asm/iommu.h>
#include <asm/upa.h>
#include <asm/oplib.h>
36
#include <asm/prom.h>
Linus Torvalds's avatar
Linus Torvalds committed
37 38 39 40 41 42
#include <asm/timer.h>
#include <asm/smp.h>
#include <asm/starfire.h>
#include <asm/uaccess.h>
#include <asm/cache.h>
#include <asm/cpudata.h>
43
#include <asm/auxio.h>
44
#include <asm/head.h>
45
#include <asm/hypervisor.h>
Linus Torvalds's avatar
Linus Torvalds committed
46 47 48 49 50 51 52 53 54 55 56 57

/* UPA nodes send interrupt packet to UltraSparc with first data reg
 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
 * delivered.  We must translate this into a non-vector IRQ so we can
 * set the softint on this cpu.
 *
 * To make processing these packets efficient and race free we use
 * an array of irq buckets below.  The interrupt vector handler in
 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
 * The IVEC handler does not need to act atomically, the PIL dispatch
 * code uses CAS to get an atomic snapshot of the list and clear it
 * at the same time.
58 59 60
 *
 * If you make changes to ino_bucket, please update hand coded assembler
 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
Linus Torvalds's avatar
Linus Torvalds committed
61
 */
62 63 64 65 66 67
struct ino_bucket {
	/* Next handler in per-CPU IRQ worklist.  We know that
	 * bucket pointers have the high 32-bits clear, so to
	 * save space we only store the bits we need.
	 */
/*0x00*/unsigned int irq_chain;
Linus Torvalds's avatar
Linus Torvalds committed
68

69 70 71 72 73
	/* Virtual interrupt number assigned to this INO.  */
/*0x04*/unsigned int virt_irq;
};

#define NUM_IVECS	(IMAP_INR + 1)
Linus Torvalds's avatar
Linus Torvalds committed
74 75
struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));

76 77 78 79 80
#define __irq_ino(irq) \
        (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))

Linus Torvalds's avatar
Linus Torvalds committed
81 82 83 84 85 86 87
/* This has to be in the main kernel image, it cannot be
 * turned into per-cpu data.  The reason is that the main
 * kernel image is locked into the TLB and this structure
 * is accessed from the vectored interrupt trap handler.  If
 * access to this structure takes a TLB miss it could cause
 * the 5-level sparc v9 trap stack to overflow.
 */
88
#define irq_work(__cpu)	&(trap_block[(__cpu)].irq_worklist)
Linus Torvalds's avatar
Linus Torvalds committed
89

90 91 92 93 94 95 96 97
static unsigned int virt_to_real_irq_table[NR_IRQS];

static unsigned char virt_irq_alloc(unsigned int real_irq)
{
	unsigned char ent;

	BUILD_BUG_ON(NR_IRQS >= 256);

98 99 100 101
	for (ent = 1; ent < NR_IRQS; ent++) {
		if (!virt_to_real_irq_table[ent])
			break;
	}
102 103 104 105 106 107 108 109 110 111
	if (ent >= NR_IRQS) {
		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
		return 0;
	}

	virt_to_real_irq_table[ent] = real_irq;

	return ent;
}

112
#ifdef CONFIG_PCI_MSI
113
static void virt_irq_free(unsigned int virt_irq)
114
{
115
	unsigned int real_irq;
116

117 118 119 120 121 122 123
	if (virt_irq >= NR_IRQS)
		return;

	real_irq = virt_to_real_irq_table[virt_irq];
	virt_to_real_irq_table[virt_irq] = 0;

	__bucket(real_irq)->virt_irq = 0;
124
}
125
#endif
126 127 128 129 130 131

static unsigned int virt_to_real_irq(unsigned char virt_irq)
{
	return virt_to_real_irq_table[virt_irq];
}

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * /proc/interrupts printing:
Linus Torvalds's avatar
Linus Torvalds committed
134 135 136 137
 */

int show_interrupts(struct seq_file *p, void *v)
{
138 139
	int i = *(loff_t *) v, j;
	struct irqaction * action;
Linus Torvalds's avatar
Linus Torvalds committed
140 141
	unsigned long flags;

142 143 144 145 146 147 148 149 150 151 152 153 154
	if (i == 0) {
		seq_printf(p, "           ");
		for_each_online_cpu(j)
			seq_printf(p, "CPU%d       ",j);
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ",i);
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157
#ifndef CONFIG_SMP
		seq_printf(p, "%10u ", kstat_irqs(i));
#else
158 159
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
Linus Torvalds's avatar
Linus Torvalds committed
160
#endif
161
		seq_printf(p, " %9s", irq_desc[i].chip->typename);
162 163 164
		seq_printf(p, "  %s", action->name);

		for (action=action->next; action; action = action->next)
165
			seq_printf(p, ", %s", action->name);
166

Linus Torvalds's avatar
Linus Torvalds committed
167
		seq_putc(p, '\n');
168 169
skip:
		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
170 171 172 173
	}
	return 0;
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{
	unsigned int tid;

	if (this_is_starfire) {
		tid = starfire_translate(imap, cpuid);
		tid <<= IMAP_TID_SHIFT;
		tid &= IMAP_TID_UPA;
	} else {
		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			unsigned long ver;

			__asm__ ("rdpr %%ver, %0" : "=r" (ver));
			if ((ver >> 32UL) == __JALAPENO_ID ||
			    (ver >> 32UL) == __SERRANO_ID) {
				tid = cpuid << IMAP_TID_SHIFT;
				tid &= IMAP_TID_JBUS;
			} else {
				unsigned int a = cpuid & 0x1f;
				unsigned int n = (cpuid >> 5) & 0x1f;

				tid = ((a << IMAP_AID_SHIFT) |
				       (n << IMAP_NID_SHIFT));
				tid &= (IMAP_AID_SAFARI |
					IMAP_NID_SAFARI);;
			}
		} else {
			tid = cpuid << IMAP_TID_SHIFT;
			tid &= IMAP_TID_UPA;
		}
	}

	return tid;
}

209 210 211
struct irq_handler_data {
	unsigned long	iclr;
	unsigned long	imap;
212

213 214 215 216
	void		(*pre_handler)(unsigned int, void *, void *);
	void		*pre_handler_arg1;
	void		*pre_handler_arg2;
};
Linus Torvalds's avatar
Linus Torvalds committed
217

218
static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
Linus Torvalds's avatar
Linus Torvalds committed
219
{
220
	unsigned int real_irq = virt_to_real_irq(virt_irq);
221
	struct ino_bucket *bucket = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
222

223 224
	if (likely(real_irq))
		bucket = __bucket(real_irq);
225

226
	return bucket;
Linus Torvalds's avatar
Linus Torvalds committed
227 228
}

229 230
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
231
{
232
	cpumask_t mask = irq_desc[virt_irq].affinity;
233
	int cpuid;
234

235 236 237 238
	if (cpus_equal(mask, CPU_MASK_ALL)) {
		static int irq_rover;
		static DEFINE_SPINLOCK(irq_rover_lock);
		unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
239

240 241 242
		/* Round-robin distribution... */
	do_round_robin:
		spin_lock_irqsave(&irq_rover_lock, flags);
243

244 245 246 247 248 249 250 251 252
		while (!cpu_online(irq_rover)) {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		}
		cpuid = irq_rover;
		do {
			if (++irq_rover >= NR_CPUS)
				irq_rover = 0;
		} while (!cpu_online(irq_rover));
Linus Torvalds's avatar
Linus Torvalds committed
253

254 255 256
		spin_unlock_irqrestore(&irq_rover_lock, flags);
	} else {
		cpumask_t tmp;
257

258
		cpus_and(tmp, cpu_online_map, mask);
259

260 261
		if (cpus_empty(tmp))
			goto do_round_robin;
262

263
		cpuid = first_cpu(tmp);
Linus Torvalds's avatar
Linus Torvalds committed
264
	}
265

266 267 268 269 270 271
	return cpuid;
}
#else
static int irq_choose_cpu(unsigned int virt_irq)
{
	return real_hard_smp_processor_id();
Linus Torvalds's avatar
Linus Torvalds committed
272
}
273
#endif
Linus Torvalds's avatar
Linus Torvalds committed
274

275
static void sun4u_irq_enable(unsigned int virt_irq)
276
{
277
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
278

279
	if (likely(data)) {
280
		unsigned long cpuid, imap, val;
281
		unsigned int tid;
282

283 284
		cpuid = irq_choose_cpu(virt_irq);
		imap = data->imap;
285

286
		tid = sun4u_compute_tid(imap, cpuid);
287

288 289 290 291 292
		val = upa_readq(imap);
		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);
		val |= tid | IMAP_VALID;
		upa_writeq(val, imap);
293 294 295
	}
}

296 297 298 299 300
static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
	sun4u_irq_enable(virt_irq);
}

301
static void sun4u_irq_disable(unsigned int virt_irq)
Linus Torvalds's avatar
Linus Torvalds committed
302
{
303
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
Linus Torvalds's avatar
Linus Torvalds committed
304

305 306
	if (likely(data)) {
		unsigned long imap = data->imap;
307
		u32 tmp = upa_readq(imap);
Linus Torvalds's avatar
Linus Torvalds committed
308

309
		tmp &= ~IMAP_VALID;
310
		upa_writeq(tmp, imap);
311 312 313
	}
}

314
static void sun4u_irq_end(unsigned int virt_irq)
315
{
316
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
317 318 319 320
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
321

322
	if (likely(data))
323
		upa_writeq(ICLR_IDLE, data->iclr);
324 325
}

326
static void sun4v_irq_enable(unsigned int virt_irq)
327
{
328 329
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
330

331 332 333
	if (likely(bucket)) {
		unsigned long cpuid;
		int err;
334

335
		cpuid = irq_choose_cpu(virt_irq);
336

337 338 339 340
		err = sun4v_intr_settarget(ino, cpuid);
		if (err != HV_EOK)
			printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
			       ino, cpuid, err);
341 342 343 344
		err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_intr_setstate(%x): "
			       "err(%d)\n", ino, err);
345 346 347 348
		err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
		if (err != HV_EOK)
			printk("sun4v_intr_setenabled(%x): err(%d)\n",
			       ino, err);
349 350 351
	}
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];

	if (likely(bucket)) {
		unsigned long cpuid;
		int err;

		cpuid = irq_choose_cpu(virt_irq);

		err = sun4v_intr_settarget(ino, cpuid);
		if (err != HV_EOK)
			printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
			       ino, cpuid, err);
	}
}

370
static void sun4v_irq_disable(unsigned int virt_irq)
Linus Torvalds's avatar
Linus Torvalds committed
371
{
372 373
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
Linus Torvalds's avatar
Linus Torvalds committed
374

375 376
	if (likely(bucket)) {
		int err;
Linus Torvalds's avatar
Linus Torvalds committed
377

378 379 380 381
		err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
		if (err != HV_EOK)
			printk("sun4v_intr_setenabled(%x): "
			       "err(%d)\n", ino, err);
Linus Torvalds's avatar
Linus Torvalds committed
382
	}
383
}
Linus Torvalds's avatar
Linus Torvalds committed
384

385 386 387 388 389 390 391 392 393 394 395 396 397 398
#ifdef CONFIG_PCI_MSI
static void sun4v_msi_enable(unsigned int virt_irq)
{
	sun4v_irq_enable(virt_irq);
	unmask_msi_irq(virt_irq);
}

static void sun4v_msi_disable(unsigned int virt_irq)
{
	mask_msi_irq(virt_irq);
	sun4v_irq_disable(virt_irq);
}
#endif

399 400 401 402
static void sun4v_irq_end(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
403 404 405 406
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
Linus Torvalds's avatar
Linus Torvalds committed
407

408 409
	if (likely(bucket)) {
		int err;
Linus Torvalds's avatar
Linus Torvalds committed
410

411 412 413 414
		err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_intr_setstate(%x): "
			       "err(%d)\n", ino, err);
Linus Torvalds's avatar
Linus Torvalds committed
415 416 417
	}
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
static void sun4v_virq_enable(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];

	if (likely(bucket)) {
		unsigned long cpuid, dev_handle, dev_ino;
		int err;

		cpuid = irq_choose_cpu(virt_irq);

		dev_handle = ino & IMAP_IGN;
		dev_ino = ino & IMAP_INO;

		err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
			       "err(%d)\n",
			       dev_handle, dev_ino, cpuid, err);
		err = sun4v_vintr_set_state(dev_handle, dev_ino,
438 439 440 441 442 443
					    HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
				"HV_INTR_STATE_IDLE): err(%d)\n",
			       dev_handle, dev_ino, err);
		err = sun4v_vintr_set_valid(dev_handle, dev_ino,
444 445 446 447 448 449 450 451
					    HV_INTR_ENABLED);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
			       "HV_INTR_ENABLED): err(%d)\n",
			       dev_handle, dev_ino, err);
	}
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];

	if (likely(bucket)) {
		unsigned long cpuid, dev_handle, dev_ino;
		int err;

		cpuid = irq_choose_cpu(virt_irq);

		dev_handle = ino & IMAP_IGN;
		dev_ino = ino & IMAP_INO;

		err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
			       "err(%d)\n",
			       dev_handle, dev_ino, cpuid, err);
	}
}

474 475 476 477 478 479 480 481 482 483 484 485
static void sun4v_virq_disable(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];

	if (likely(bucket)) {
		unsigned long dev_handle, dev_ino;
		int err;

		dev_handle = ino & IMAP_IGN;
		dev_ino = ino & IMAP_INO;

486
		err = sun4v_vintr_set_valid(dev_handle, dev_ino,
487 488 489 490 491 492 493 494 495 496 497 498
					    HV_INTR_DISABLED);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
			       "HV_INTR_DISABLED): err(%d)\n",
			       dev_handle, dev_ino, err);
	}
}

static void sun4v_virq_end(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = bucket - &ivector_table[0];
499 500 501 502
	struct irq_desc *desc = irq_desc + virt_irq;

	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
		return;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519

	if (likely(bucket)) {
		unsigned long dev_handle, dev_ino;
		int err;

		dev_handle = ino & IMAP_IGN;
		dev_ino = ino & IMAP_INO;

		err = sun4v_vintr_set_state(dev_handle, dev_ino,
					    HV_INTR_STATE_IDLE);
		if (err != HV_EOK)
			printk("sun4v_vintr_set_state(%lx,%lx,"
				"HV_INTR_STATE_IDLE): err(%d)\n",
			       dev_handle, dev_ino, err);
	}
}

520
static void run_pre_handler(unsigned int virt_irq)
Linus Torvalds's avatar
Linus Torvalds committed
521
{
522
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
523
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
Linus Torvalds's avatar
Linus Torvalds committed
524

525 526 527 528
	if (likely(data->pre_handler)) {
		data->pre_handler(__irq_ino(__irq(bucket)),
				  data->pre_handler_arg1,
				  data->pre_handler_arg2);
Linus Torvalds's avatar
Linus Torvalds committed
529
	}
530 531
}

532
static struct irq_chip sun4u_irq = {
533 534 535 536
	.typename	= "sun4u",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
	.end		= sun4u_irq_end,
537
	.set_affinity	= sun4u_set_affinity,
538
};
539

540
static struct irq_chip sun4u_irq_ack = {
541 542 543 544 545
	.typename	= "sun4u+ack",
	.enable		= sun4u_irq_enable,
	.disable	= sun4u_irq_disable,
	.ack		= run_pre_handler,
	.end		= sun4u_irq_end,
546
	.set_affinity	= sun4u_set_affinity,
547
};
548

549
static struct irq_chip sun4v_irq = {
550 551 552 553
	.typename	= "sun4v",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
	.end		= sun4v_irq_end,
554
	.set_affinity	= sun4v_set_affinity,
555
};
Linus Torvalds's avatar
Linus Torvalds committed
556

557
static struct irq_chip sun4v_irq_ack = {
558 559 560 561 562
	.typename	= "sun4v+ack",
	.enable		= sun4v_irq_enable,
	.disable	= sun4v_irq_disable,
	.ack		= run_pre_handler,
	.end		= sun4v_irq_end,
563
	.set_affinity	= sun4v_set_affinity,
564
};
Linus Torvalds's avatar
Linus Torvalds committed
565

566 567 568 569 570 571 572 573 574
#ifdef CONFIG_PCI_MSI
static struct irq_chip sun4v_msi = {
	.typename	= "sun4v+msi",
	.mask		= mask_msi_irq,
	.unmask		= unmask_msi_irq,
	.enable		= sun4v_msi_enable,
	.disable	= sun4v_msi_disable,
	.ack		= run_pre_handler,
	.end		= sun4v_irq_end,
575
	.set_affinity	= sun4v_set_affinity,
576 577 578
};
#endif

579 580 581 582 583
static struct irq_chip sun4v_virq = {
	.typename	= "vsun4v",
	.enable		= sun4v_virq_enable,
	.disable	= sun4v_virq_disable,
	.end		= sun4v_virq_end,
584
	.set_affinity	= sun4v_virt_set_affinity,
585 586 587 588 589 590 591 592
};

static struct irq_chip sun4v_virq_ack = {
	.typename	= "vsun4v+ack",
	.enable		= sun4v_virq_enable,
	.disable	= sun4v_virq_disable,
	.ack		= run_pre_handler,
	.end		= sun4v_virq_end,
593
	.set_affinity	= sun4v_virt_set_affinity,
594 595
};

596 597 598 599
void irq_install_pre_handler(int virt_irq,
			     void (*func)(unsigned int, void *, void *),
			     void *arg1, void *arg2)
{
600 601
	struct irq_handler_data *data = get_irq_chip_data(virt_irq);
	struct irq_chip *chip;
602

603 604 605
	data->pre_handler = func;
	data->pre_handler_arg1 = arg1;
	data->pre_handler_arg2 = arg2;
Linus Torvalds's avatar
Linus Torvalds committed
606

607 608
	chip = get_irq_chip(virt_irq);
	if (chip == &sun4u_irq_ack ||
609 610
	    chip == &sun4v_irq_ack ||
	    chip == &sun4v_virq_ack
611 612 613 614
#ifdef CONFIG_PCI_MSI
	    || chip == &sun4v_msi
#endif
	    )
615 616
		return;

617
	chip = (chip == &sun4u_irq ?
618 619 620
		&sun4u_irq_ack :
		(chip == &sun4v_irq ?
		 &sun4v_irq_ack : &sun4v_virq_ack));
621
	set_irq_chip(virt_irq, chip);
622
}
Linus Torvalds's avatar
Linus Torvalds committed
623

624 625 626 627 628
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
	int ino;
Linus Torvalds's avatar
Linus Torvalds committed
629

630
	BUG_ON(tlb_type == hypervisor);
631

632
	ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
633 634 635
	bucket = &ivector_table[ino];
	if (!bucket->virt_irq) {
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
636
		set_irq_chip(bucket->virt_irq, &sun4u_irq);
637
	}
Linus Torvalds's avatar
Linus Torvalds committed
638

639 640
	data = get_irq_chip_data(bucket->virt_irq);
	if (unlikely(data))
641
		goto out;
642

643 644 645 646
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
Linus Torvalds's avatar
Linus Torvalds committed
647
	}
648
	set_irq_chip_data(bucket->virt_irq, data);
Linus Torvalds's avatar
Linus Torvalds committed
649

650 651
	data->imap  = imap;
	data->iclr  = iclr;
Linus Torvalds's avatar
Linus Torvalds committed
652

653 654 655
out:
	return bucket->virt_irq;
}
Linus Torvalds's avatar
Linus Torvalds committed
656

657 658
static unsigned int sun4v_build_common(unsigned long sysino,
				       struct irq_chip *chip)
Linus Torvalds's avatar
Linus Torvalds committed
659
{
660
	struct ino_bucket *bucket;
661
	struct irq_handler_data *data;
662

663
	BUG_ON(tlb_type != hypervisor);
Linus Torvalds's avatar
Linus Torvalds committed
664

665 666 667
	bucket = &ivector_table[sysino];
	if (!bucket->virt_irq) {
		bucket->virt_irq = virt_irq_alloc(__irq(bucket));
668
		set_irq_chip(bucket->virt_irq, chip);
Linus Torvalds's avatar
Linus Torvalds committed
669 670
	}

671 672
	data = get_irq_chip_data(bucket->virt_irq);
	if (unlikely(data))
Linus Torvalds's avatar
Linus Torvalds committed
673 674
		goto out;

675 676 677 678 679
	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
680
	set_irq_chip_data(bucket->virt_irq, data);
Linus Torvalds's avatar
Linus Torvalds committed
681

682 683 684 685 686 687
	/* Catch accidental accesses to these things.  IMAP/ICLR handling
	 * is done by hypervisor calls on sun4v platforms, not by direct
	 * register accesses.
	 */
	data->imap = ~0UL;
	data->iclr = ~0UL;
Linus Torvalds's avatar
Linus Torvalds committed
688

689 690 691
out:
	return bucket->virt_irq;
}
Linus Torvalds's avatar
Linus Torvalds committed
692

693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);

	return sun4v_build_common(sysino, &sun4v_irq);
}

unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
	unsigned long sysino, hv_err;

	BUG_ON(devhandle & ~IMAP_IGN);
	BUG_ON(devino & ~IMAP_INO);

	sysino = devhandle | devino;

	hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
	if (hv_err) {
		prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
			    "err=%lu\n", devhandle, devino, hv_err);
		prom_halt();
	}

	return sun4v_build_common(sysino, &sun4v_virq);
}

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
#ifdef CONFIG_PCI_MSI
unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
			     unsigned int msi_start, unsigned int msi_end)
{
	struct ino_bucket *bucket;
	struct irq_handler_data *data;
	unsigned long sysino;
	unsigned int devino;

	BUG_ON(tlb_type != hypervisor);

	/* Find a free devino in the given range.  */
	for (devino = msi_start; devino < msi_end; devino++) {
		sysino = sun4v_devino_to_sysino(devhandle, devino);
		bucket = &ivector_table[sysino];
		if (!bucket->virt_irq)
			break;
	}
	if (devino >= msi_end)
		return 0;

	sysino = sun4v_devino_to_sysino(devhandle, devino);
	bucket = &ivector_table[sysino];
	bucket->virt_irq = virt_irq_alloc(__irq(bucket));
	*virt_irq_p = bucket->virt_irq;
	set_irq_chip(bucket->virt_irq, &sun4v_msi);

	data = get_irq_chip_data(bucket->virt_irq);
	if (unlikely(data))
		return devino;

	data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
	if (unlikely(!data)) {
		prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
		prom_halt();
	}
	set_irq_chip_data(bucket->virt_irq, data);

	data->imap = ~0UL;
	data->iclr = ~0UL;

	return devino;
}

void sun4v_destroy_msi(unsigned int virt_irq)
{
	virt_irq_free(virt_irq);
}
#endif

769 770 771 772
void ack_bad_irq(unsigned int virt_irq)
{
	struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
	unsigned int ino = 0xdeadbeef;
773

774 775
	if (bucket)
		ino = bucket - &ivector_table[0];
776

777 778
	printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
	       ino, virt_irq);
Linus Torvalds's avatar
Linus Torvalds committed
779 780 781 782
}

void handler_irq(int irq, struct pt_regs *regs)
{
783
	struct ino_bucket *bucket;
Al Viro's avatar
Al Viro committed
784
	struct pt_regs *old_regs;
Linus Torvalds's avatar
Linus Torvalds committed
785 786 787

	clear_softint(1 << irq);

Al Viro's avatar
Al Viro committed
788
	old_regs = set_irq_regs(regs);
Linus Torvalds's avatar
Linus Torvalds committed
789 790 791
	irq_enter();

	/* Sliiiick... */
792 793 794
	bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
	while (bucket) {
		struct ino_bucket *next = __bucket(bucket->irq_chain);
Linus Torvalds's avatar
Linus Torvalds committed
795

796
		bucket->irq_chain = 0;
Al Viro's avatar
Al Viro committed
797
		__do_IRQ(bucket->virt_irq);
798

799
		bucket = next;
Linus Torvalds's avatar
Linus Torvalds committed
800
	}
801

Linus Torvalds's avatar
Linus Torvalds committed
802
	irq_exit();
Al Viro's avatar
Al Viro committed
803
	set_irq_regs(old_regs);
Linus Torvalds's avatar
Linus Torvalds committed
804 805
}

806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
	unsigned int irq;

	for (irq = 0; irq < NR_IRQS; irq++) {
		unsigned long flags;

		spin_lock_irqsave(&irq_desc[irq].lock, flags);
		if (irq_desc[irq].action &&
		    !(irq_desc[irq].status & IRQ_PER_CPU)) {
			if (irq_desc[irq].chip->set_affinity)
				irq_desc[irq].chip->set_affinity(irq,
					irq_desc[irq].affinity);
		}
		spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
	}
}
#endif

826 827 828 829 830 831
struct sun5_timer {
	u64	count0;
	u64	limit0;
	u64	count1;
	u64	limit1;
};
Linus Torvalds's avatar
Linus Torvalds committed
832

833
static struct sun5_timer *prom_timers;
Linus Torvalds's avatar
Linus Torvalds committed
834 835 836 837
static u64 prom_limit0, prom_limit1;

static void map_prom_timers(void)
{
838
	struct device_node *dp;
839
	const unsigned int *addr;
Linus Torvalds's avatar
Linus Torvalds committed
840 841

	/* PROM timer node hangs out in the top level of device siblings... */
842 843 844 845 846 847 848
	dp = of_find_node_by_path("/");
	dp = dp->child;
	while (dp) {
		if (!strcmp(dp->name, "counter-timer"))
			break;
		dp = dp->sibling;
	}
Linus Torvalds's avatar
Linus Torvalds committed
849 850 851 852

	/* Assume if node is not present, PROM uses different tick mechanism
	 * which we should not care about.
	 */
853
	if (!dp) {
Linus Torvalds's avatar
Linus Torvalds committed
854 855 856 857 858
		prom_timers = (struct sun5_timer *) 0;
		return;
	}

	/* If PROM is really using this, it must be mapped by him. */
859 860
	addr = of_get_property(dp, "address", NULL);
	if (!addr) {
Linus Torvalds's avatar
Linus Torvalds committed
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
		prom_printf("PROM does not have timer mapped, trying to continue.\n");
		prom_timers = (struct sun5_timer *) 0;
		return;
	}
	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}

static void kill_prom_timer(void)
{
	if (!prom_timers)
		return;

	/* Save them away for later. */
	prom_limit0 = prom_timers->limit0;
	prom_limit1 = prom_timers->limit1;

	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
	 * We turn both off here just to be paranoid.
	 */
	prom_timers->limit0 = 0;
	prom_timers->limit1 = 0;

	/* Wheee, eat the interrupt packet too... */
	__asm__ __volatile__(
"	mov	0x40, %%g2\n"
"	ldxa	[%%g0] %0, %%g1\n"
"	ldxa	[%%g2] %1, %%g1\n"
"	stxa	%%g0, [%%g0] %0\n"
"	membar	#Sync\n"
	: /* no outputs */
	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
	: "g1", "g2");
}

void init_irqwork_curcpu(void)
{
	int cpu = hard_smp_processor_id();

899
	trap_block[cpu].irq_worklist = 0;
Linus Torvalds's avatar
Linus Torvalds committed
900 901
}

902 903 904 905 906 907 908 909 910 911 912 913
/* Please be very careful with register_one_mondo() and
 * sun4v_register_mondo_queues().
 *
 * On SMP this gets invoked from the CPU trampoline before
 * the cpu has fully taken over the trap table from OBP,
 * and it's kernel stack + %g6 thread register state is
 * not fully cooked yet.
 *
 * Therefore you cannot make any OBP calls, not even prom_printf,
 * from these two routines.
 */
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
914
{
915
	unsigned long num_entries = (qmask + 1) / 64;
916 917 918 919 920 921
	unsigned long status;

	status = sun4v_cpu_qconf(type, paddr, num_entries);
	if (status != HV_EOK) {
		prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
			    "err %lu\n", type, paddr, num_entries, status);
922 923 924 925
		prom_halt();
	}
}

926
static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
927
{
928 929
	struct trap_per_cpu *tb = &trap_block[this_cpu];

930 931 932 933 934 935 936 937
	register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
			   tb->cpu_mondo_qmask);
	register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
			   tb->dev_mondo_qmask);
	register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
			   tb->resum_qmask);
	register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
			   tb->nonresum_qmask);
938 939
}

940
static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
941
{
942 943 944
	unsigned long size = PAGE_ALIGN(qmask + 1);
	unsigned long order = get_order(size);
	void *p = NULL;
945

946 947 948 949 950 951 952
	if (use_bootmem) {
		p = __alloc_bootmem_low(size, size, 0);
	} else {
		struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
		if (page)
			p = page_address(page);
	}
953

954
	if (!p) {
955 956 957 958
		prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
		prom_halt();
	}

959
	*pa_ptr = __pa(p);
960 961
}

962
static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
963
{
964 965 966
	unsigned long size = PAGE_ALIGN(qmask + 1);
	unsigned long order = get_order(size);
	void *p = NULL;
967

968 969 970 971 972 973 974
	if (use_bootmem) {
		p = __alloc_bootmem_low(size, size, 0);
	} else {
		struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
		if (page)
			p = page_address(page);
	}
975

976
	if (!p) {
977 978 979 980
		prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
		prom_halt();
	}

981
	*pa_ptr = __pa(p);
982 983
}

984
static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
985 986
{
#ifdef CONFIG_SMP
987
	void *page;
988 989 990

	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));

991 992 993 994 995
	if (use_bootmem)
		page = alloc_bootmem_low_pages(PAGE_SIZE);
	else
		page = (void *) get_zeroed_page(GFP_ATOMIC);

996 997 998 999 1000 1001 1002 1003 1004 1005
	if (!page) {
		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
		prom_halt();
	}

	tb->cpu_mondo_block_pa = __pa(page);
	tb->cpu_list_pa = __pa(page + 64);
#endif
}

1006
/* Allocate and register the mondo and error queues for this cpu.  */
1007
void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
1008 1009 1010
{
	struct trap_per_cpu *tb = &trap_block[cpu];

1011
	if (alloc) {
1012 1013 1014 1015 1016 1017
		alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
		alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
		alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
		alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
		alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
		alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
1018

1019 1020
		init_cpu_send_mondo_info(tb, use_bootmem);
	}
1021

1022 1023 1024 1025 1026 1027 1028 1029
	if (load) {
		if (cpu != hard_smp_processor_id()) {
			prom_printf("SUN4V: init mondo on cpu %d not %d\n",
				    cpu, hard_smp_processor_id());
			prom_halt();
		}
		sun4v_register_mondo_queues(cpu);
	}
1030 1031
}

1032 1033 1034 1035
static struct irqaction timer_irq_action = {
	.name = "timer",
};

Linus Torvalds's avatar
Linus Torvalds committed
1036 1037 1038 1039 1040 1041 1042
/* Only invoked on boot processor. */
void __init init_IRQ(void)
{
	map_prom_timers();
	kill_prom_timer();
	memset(&ivector_table[0], 0, sizeof(ivector_table));

1043
	if (tlb_type == hypervisor)
1044
		sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
1045

Linus Torvalds's avatar
Linus Torvalds committed
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	/* We need to clear any IRQ's pending in the soft interrupt
	 * registers, a spurious one could be left around from the
	 * PROM timer which we just disabled.
	 */
	clear_softint(get_softint());

	/* Now that ivector table is initialized, it is safe
	 * to receive IRQ vector traps.  We will normally take
	 * one or two right now, in case some device PROM used
	 * to boot us wants to speak to us.  We just ignore them.
	 */
	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"
			     "or	%%g1, %0, %%g1\n\t"
			     "wrpr	%%g1, 0x0, %%pstate"
			     : /* No outputs */
			     : "i" (PSTATE_IE)
			     : "g1");

1064
	irq_desc[0].action = &timer_irq_action;
Linus Torvalds's avatar
Linus Torvalds committed
1065
}