aboutsummaryrefslogtreecommitdiff
path: root/ld/testsuite/ld-elf/implib.s
blob: 0e497aa95d80e0d14e7c5ec1029789c5ad58039a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
 .ifndef NO_GLOBAL
  .ifdef HPUX
exported1 .comm	1
  .else
	.comm	exported1,1
  .endif

	.data
	.global	exported2
	.type	exported2, %object
	.size	exported2, 1
exported2:
	.byte	21
 .endif

	.section ".bss", "aw", %nobits
not_exported1:
	.space	1
	.size	not_exported1, 1

	.data
	.type	not_exported2, %object
	.size	not_exported2, 1
not_exported2:
	.byte	42
opt">= opaque; uint32_t saddr; saddr = addr >> 2; DPRINTF("write system reg 0x" TARGET_FMT_plx " = %x\n", addr, val); switch (saddr) { case 2: // clear (enable) // Force clear unused bits val &= MASTER_IRQ_MASK; s->intregm_disabled &= ~val; DPRINTF("Enabled master irq mask %x, curmask %x\n", val, s->intregm_disabled); slavio_check_interrupts(s, 1); break; case 3: // set (disable; doesn't affect pending) // Force clear unused bits val &= MASTER_IRQ_MASK; s->intregm_disabled |= val; slavio_check_interrupts(s, 1); DPRINTF("Disabled master irq mask %x, curmask %x\n", val, s->intregm_disabled); break; case 4: s->target_cpu = val & (MAX_CPUS - 1); slavio_check_interrupts(s, 1); DPRINTF("Set master irq cpu %d\n", s->target_cpu); break; default: break; } } static CPUReadMemoryFunc * const slavio_intctlm_mem_read[3] = { NULL, NULL, slavio_intctlm_mem_readl, }; static CPUWriteMemoryFunc * const slavio_intctlm_mem_write[3] = { NULL, NULL, slavio_intctlm_mem_writel, }; void slavio_pic_info(Monitor *mon, DeviceState *dev) { SysBusDevice *sd; SLAVIO_INTCTLState *s; int i; sd = sysbus_from_qdev(dev); s = FROM_SYSBUS(SLAVIO_INTCTLState, sd); for (i = 0; i < MAX_CPUS; i++) { monitor_printf(mon, "per-cpu %d: pending 0x%08x\n", i, s->slaves[i].intreg_pending); } monitor_printf(mon, "master: pending 0x%08x, disabled 0x%08x\n", s->intregm_pending, s->intregm_disabled); } void slavio_irq_info(Monitor *mon, DeviceState *dev) { #ifndef DEBUG_IRQ_COUNT monitor_printf(mon, "irq statistic code not compiled.\n"); #else SysBusDevice *sd; SLAVIO_INTCTLState *s; int i; int64_t count; sd = sysbus_from_qdev(dev); s = FROM_SYSBUS(SLAVIO_INTCTLState, sd); monitor_printf(mon, "IRQ statistics:\n"); for (i = 0; i < 32; i++) { count = s->irq_count[i]; if (count > 0) monitor_printf(mon, "%2d: %" PRId64 "\n", i, count); } #endif } static const uint32_t intbit_to_level[] = { 2, 3, 5, 7, 9, 11, 13, 2, 3, 5, 7, 9, 11, 13, 12, 12, 6, 13, 4, 10, 8, 9, 11, 0, 0, 0, 0, 15, 15, 15, 15, 0, }; static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs) { uint32_t pending = s->intregm_pending, pil_pending; unsigned int i, j; pending &= ~s->intregm_disabled; DPRINTF("pending %x disabled %x\n", pending, s->intregm_disabled); for (i = 0; i < MAX_CPUS; i++) { pil_pending = 0; /* If we are the current interrupt target, get hard interrupts */ if (pending && !(s->intregm_disabled & MASTER_DISABLE) && (i == s->target_cpu)) { for (j = 0; j < 32; j++) { if ((pending & (1 << j)) && intbit_to_level[j]) { pil_pending |= 1 << intbit_to_level[j]; } } } /* Calculate current pending hard interrupts for display */ s->slaves[i].intreg_pending &= CPU_SOFTIRQ_MASK | CPU_IRQ_INT15_IN | CPU_IRQ_TIMER_IN; if (i == s->target_cpu) { for (j = 0; j < 32; j++) { if ((s->intregm_pending & (1 << j)) && intbit_to_level[j]) { s->slaves[i].intreg_pending |= 1 << intbit_to_level[j]; } } } /* Level 15 and CPU timer interrupts are only masked when the MASTER_DISABLE bit is set */ if (!(s->intregm_disabled & MASTER_DISABLE)) { pil_pending |= s->slaves[i].intreg_pending & (CPU_IRQ_INT15_IN | CPU_IRQ_TIMER_IN); } /* Add soft interrupts */ pil_pending |= (s->slaves[i].intreg_pending & CPU_SOFTIRQ_MASK) >> 16; if (set_irqs) { for (j = MAX_PILS; j > 0; j--) { if (pil_pending & (1 << j)) { if (!(s->slaves[i].irl_out & (1 << j))) { qemu_irq_raise(s->cpu_irqs[i][j]); } } else { if (s->slaves[i].irl_out & (1 << j)) { qemu_irq_lower(s->cpu_irqs[i][j]); } } } } s->slaves[i].irl_out = pil_pending; } } /* * "irq" here is the bit number in the system interrupt register to * separate serial and keyboard interrupts sharing a level. */ static void slavio_set_irq(void *opaque, int irq, int level) { SLAVIO_INTCTLState *s = opaque; uint32_t mask = 1 << irq; uint32_t pil = intbit_to_level[irq]; unsigned int i; DPRINTF("Set cpu %d irq %d -> pil %d level %d\n", s->target_cpu, irq, pil, level); if (pil > 0) { if (level) { #ifdef DEBUG_IRQ_COUNT s->irq_count[pil]++; #endif s->intregm_pending |= mask; if (pil == 15) { for (i = 0; i < MAX_CPUS; i++) { s->slaves[i].intreg_pending |= 1 << pil; } } } else { s->intregm_pending &= ~mask; if (pil == 15) { for (i = 0; i < MAX_CPUS; i++) { s->slaves[i].intreg_pending &= ~(1 << pil); } } } slavio_check_interrupts(s, 1); } } static void slavio_set_timer_irq_cpu(void *opaque, int cpu, int level) { SLAVIO_INTCTLState *s = opaque; DPRINTF("Set cpu %d local timer level %d\n", cpu, level); if (level) { s->slaves[cpu].intreg_pending |= CPU_IRQ_TIMER_IN; } else { s->slaves[cpu].intreg_pending &= ~CPU_IRQ_TIMER_IN; } slavio_check_interrupts(s, 1); } static void slavio_set_irq_all(void *opaque, int irq, int level) { if (irq < 32) { slavio_set_irq(opaque, irq, level); } else { slavio_set_timer_irq_cpu(opaque, irq - 32, level); } } static int vmstate_intctl_post_load(void *opaque, int version_id) { SLAVIO_INTCTLState *s = opaque; slavio_check_interrupts(s, 0); return 0; } static const VMStateDescription vmstate_intctl_cpu = { .name ="slavio_intctl_cpu", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { VMSTATE_UINT32(intreg_pending, SLAVIO_CPUINTCTLState), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_intctl = { .name ="slavio_intctl", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .post_load = vmstate_intctl_post_load, .fields = (VMStateField []) { VMSTATE_STRUCT_ARRAY(slaves, SLAVIO_INTCTLState, MAX_CPUS, 1, vmstate_intctl_cpu, SLAVIO_CPUINTCTLState), VMSTATE_UINT32(intregm_pending, SLAVIO_INTCTLState), VMSTATE_UINT32(intregm_disabled, SLAVIO_INTCTLState), VMSTATE_UINT32(target_cpu, SLAVIO_INTCTLState), VMSTATE_END_OF_LIST() } }; static void slavio_intctl_reset(DeviceState *d) { SLAVIO_INTCTLState *s = container_of(d, SLAVIO_INTCTLState, busdev.qdev); int i; for (i = 0; i < MAX_CPUS; i++) { s->slaves[i].intreg_pending = 0; s->slaves[i].irl_out = 0; } s->intregm_disabled = ~MASTER_IRQ_MASK; s->intregm_pending = 0; s->target_cpu = 0; slavio_check_interrupts(s, 0); } static int slavio_intctl_init1(SysBusDevice *dev) { SLAVIO_INTCTLState *s = FROM_SYSBUS(SLAVIO_INTCTLState, dev); int io_memory; unsigned int i, j; qdev_init_gpio_in(&dev->qdev, slavio_set_irq_all, 32 + MAX_CPUS); io_memory = cpu_register_io_memory(slavio_intctlm_mem_read, slavio_intctlm_mem_write, s); sysbus_init_mmio(dev, INTCTLM_SIZE, io_memory); for (i = 0; i < MAX_CPUS; i++) { for (j = 0; j < MAX_PILS; j++) { sysbus_init_irq(dev, &s->cpu_irqs[i][j]); } io_memory = cpu_register_io_memory(slavio_intctl_mem_read, slavio_intctl_mem_write, &s->slaves[i]); sysbus_init_mmio(dev, INTCTL_SIZE, io_memory); s->slaves[i].cpu = i; s->slaves[i].master = s; } return 0; } static SysBusDeviceInfo slavio_intctl_info = { .init = slavio_intctl_init1, .qdev.name = "slavio_intctl", .qdev.size = sizeof(SLAVIO_INTCTLState), .qdev.vmsd = &vmstate_intctl, .qdev.reset = slavio_intctl_reset, }; static void slavio_intctl_register_devices(void) { sysbus_register_withprop(&slavio_intctl_info); } device_init(slavio_intctl_register_devices)