aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml19
-rw-r--r--Makefile.main9
-rw-r--r--Makefile.rules4
-rw-r--r--core/chip.c5
-rw-r--r--core/device.c10
-rw-r--r--core/hostservices.c40
-rw-r--r--core/init.c4
-rw-r--r--core/interrupts.c90
-rw-r--r--core/opal-msg.c4
-rw-r--r--core/opal.c16
-rw-r--r--core/pci.c163
-rw-r--r--core/test/Makefile.check1
-rw-r--r--core/test/run-timebase.c56
-rw-r--r--core/test/run-trace.c9
-rw-r--r--core/trace.c6
-rw-r--r--core/utils.c3
-rw-r--r--coverity-model.c23
-rw-r--r--doc/device-tree/nvlink.txt137
-rw-r--r--doc/nvlink.txt157
-rw-r--r--doc/stable-skiboot-rules.txt62
-rw-r--r--external/common/arch_flash.h3
-rw-r--r--external/common/arch_flash_arm.c5
-rw-r--r--external/common/arch_flash_powerpc.c11
-rw-r--r--external/common/arch_flash_x86.c4
-rwxr-xr-xexternal/common/get_arch.sh2
-rw-r--r--external/common/rules.mk49
-rw-r--r--external/gard/.gitignore9
-rw-r--r--external/gard/Makefile63
-rw-r--r--external/gard/Makefile.dist10
-rw-r--r--external/gard/gard.c182
-rw-r--r--external/gard/rules.mk39
-rw-r--r--external/gard/test/Makefile.check20
-rw-r--r--external/gard/test/files/data1.binbin0 -> 20480 bytes
-rwxr-xr-xexternal/gard/test/make-check-test1
-rw-r--r--external/gard/test/results/00-list.err0
-rw-r--r--external/gard/test/results/00-list.out5
-rw-r--r--external/gard/test/results/01-show_1.err0
-rw-r--r--external/gard/test/results/01-show_1.out9
-rw-r--r--external/gard/test/results/02-usage.err18
-rw-r--r--external/gard/test/results/02-usage.out1
-rwxr-xr-xexternal/gard/test/test-gard5
-rw-r--r--external/gard/test/tests/00-list10
-rw-r--r--external/gard/test/tests/01-show_110
-rw-r--r--external/gard/test/tests/02-usage12
-rw-r--r--external/mambo/mambo_utils.tcl150
-rw-r--r--external/mambo/skiboot.tcl40
-rw-r--r--external/opal-prd/Makefile27
-rw-r--r--external/opal-prd/opal-prd.c32
-rw-r--r--external/opal-prd/opal-prd.service10
-rw-r--r--external/opal-prd/opal-prd.socket8
-rw-r--r--external/opal-prd/pnor.c211
-rw-r--r--external/opal-prd/pnor.h2
-rw-r--r--external/opal-prd/test/test_pnor.c10
-rw-r--r--external/pflash/.gitignore5
-rw-r--r--external/pflash/Makefile28
-rw-r--r--external/pflash/ast.h67
-rwxr-xr-xexternal/pflash/build-all-arch.sh35
-rw-r--r--external/pflash/pflash.c3
-rw-r--r--external/pflash/rules.mk23
-rw-r--r--external/shared/Makefile57
-rw-r--r--external/shared/config.h19
-rwxr-xr-xexternal/test/test.sh101
-rw-r--r--hdata/hdif.c2
-rw-r--r--hdata/hdif.h2
-rw-r--r--hdata/iohub.c276
-rw-r--r--hdata/memory.c9
-rw-r--r--hdata/paca.c2
-rw-r--r--hdata/spira.c4
-rw-r--r--hdata/spira.h1
-rw-r--r--hw/Makefile.inc4
-rw-r--r--hw/ast-bmc/ast-sf-ctrl.c10
-rw-r--r--hw/bt.c224
-rw-r--r--hw/cec.c12
-rw-r--r--hw/chiptod.c219
-rw-r--r--hw/fake-rtc.c59
-rw-r--r--hw/fsp/fsp-attn.c2
-rw-r--r--hw/fsp/fsp-chiptod.c13
-rw-r--r--hw/fsp/fsp-dpo.c62
-rw-r--r--hw/fsp/fsp-dump.c2
-rw-r--r--hw/fsp/fsp-leds.c2
-rw-r--r--hw/fsp/fsp-mdst-table.c23
-rw-r--r--hw/fsp/fsp-mem-err.c26
-rw-r--r--hw/fsp/fsp-sensor.c16
-rw-r--r--hw/ipmi/ipmi-rtc.c8
-rw-r--r--hw/ipmi/ipmi-sel.c2
-rw-r--r--hw/ipmi/ipmi-sensor.c5
-rw-r--r--hw/lpc-rtc.c2
-rw-r--r--hw/lpc-uart.c2
-rw-r--r--hw/lpc.c11
-rw-r--r--hw/npu-hw-procedures.c602
-rw-r--r--hw/npu.c1825
-rw-r--r--hw/nx-rng.c2
-rw-r--r--hw/occ.c72
-rw-r--r--hw/p5ioc2-phb.c1206
-rw-r--r--hw/p5ioc2.c298
-rw-r--r--hw/p7ioc-inits.c590
-rw-r--r--hw/p7ioc-phb.c186
-rw-r--r--hw/p7ioc.c55
-rw-r--r--hw/p8-i2c.c3
-rw-r--r--hw/phb3.c175
-rw-r--r--hw/psi.c2
-rw-r--r--hw/slw.c41
-rw-r--r--hw/xscom.c2
-rw-r--r--include/ast.h1
-rw-r--r--include/bitutils.h3
-rw-r--r--include/cec.h4
-rw-r--r--include/chip.h6
-rw-r--r--include/fsp-mdst-table.h6
-rw-r--r--include/hostservices.h3
-rw-r--r--include/interrupts.h42
-rw-r--r--include/ipmi.h1
-rw-r--r--include/npu-regs.h235
-rw-r--r--include/npu.h214
-rw-r--r--include/opal-internal.h10
-rw-r--r--include/p5ioc2-regs.h234
-rw-r--r--include/p5ioc2.h184
-rw-r--r--include/pci-cfg.h13
-rw-r--r--include/pci.h45
-rw-r--r--include/phb3.h4
-rw-r--r--include/skiboot.h4
-rw-r--r--include/types.h7
-rw-r--r--libc/stdio/Makefile.inc6
-rw-r--r--libc/stdio/fscanf.c26
-rw-r--r--libc/stdio/scanf.c26
-rw-r--r--libc/stdio/vfscanf.c269
-rw-r--r--libc/stdio/vsscanf.c131
-rw-r--r--libfdt/fdt_rw.c4
-rw-r--r--libflash/blocklevel.c70
-rw-r--r--libflash/blocklevel.h4
-rw-r--r--libflash/file.c94
-rw-r--r--libflash/file.h4
-rw-r--r--libflash/libffs.c124
-rw-r--r--libflash/libffs.h31
-rw-r--r--libflash/libflash.c10
-rw-r--r--libpore/sbe_xip_image.c7
-rwxr-xr-xopal-ci/build-qemu-powernv.sh8
-rwxr-xr-xopal-ci/fetch-debian-jessie-installer.sh3
-rwxr-xr-xopal-ci/install-deps-qemu-powernv.sh5
-rw-r--r--platforms/astbmc/garrison.c61
-rw-r--r--skiboot.spec115
-rw-r--r--test/Makefile.check12
-rw-r--r--test/hello_world/Makefile.check11
-rwxr-xr-xtest/hello_world/run_mambo_hello_world.sh (renamed from test/hello_world/run_hello_world.sh)0
-rwxr-xr-xtest/hello_world/run_qemu_hello_world.sh45
-rwxr-xr-xtest/run_mambo_boot_test.sh (renamed from test/run_boot_test.sh)0
-rwxr-xr-xtest/run_qemu-jessie-debian-installer_boot_test.sh67
-rwxr-xr-xtest/run_qemu_boot_test.sh60
147 files changed, 6151 insertions, 4231 deletions
diff --git a/.travis.yml b/.travis.yml
index b9f11cc..5934e88 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,15 +1,20 @@
language: c
+sudo: required
+dist: trusty
+
before_install:
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
- sudo apt-get update -qq
- sudo apt-get install -y gcc-4.8 libstdc++6 valgrind expect xterm
+ - sudo apt-get install -y gcc-arm-linux-gnueabi gcc-powerpc64le-linux-gnu gcc
- sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 50
- wget https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.8.0/x86_64-gcc-4.8.0-nolibc_powerpc64-linux.tar.xz
- sudo mkdir /opt/cross
- sudo tar -C /opt/cross -xvf x86_64-gcc-4.8.0-nolibc_powerpc64-linux.tar.xz
- wget ftp://public.dhe.ibm.com/software/server/powerfuncsim/p8/packages/v1.0-2/systemsim-p8_1.0-2_amd64.deb
- sudo dpkg -i systemsim-p8_1.0-2_amd64.deb
+ - ./opal-ci/install-deps-qemu-powernv.sh
env:
global:
@@ -19,8 +24,16 @@ env:
# via the "travis encrypt" command using the project repo's public key
- secure: "MpNEGFa1VrF/vsQq24n5UgfRbz1wVC6B8mubFnyK4gX0IuQ9xhWuTzMLUQF9UJxe5jnC2DTmVUvYTYN/hggw+PpYwbOOAE0QGR5pmPHA4PSRmc5pxt1q18/sv7EPFw66GFyWJq94nWjpigyKQ8KGtA67j1xFqrDoS43OA76WZgo="
+before_script:
+ - mkdir -p external/opal-prd/asm
+ - wget https://raw.githubusercontent.com/open-power/linux/master/arch/powerpc/include/uapi/asm/opal-prd.h -O external/opal-prd/asm/opal-prd.h
+
script:
- - make -j4 all check ; (make clean; cd external/gard && make) ; (make clean; cd external/pflash && make)
+ - (cd opal-ci; ./build-qemu-powernv.sh)
+ - ./opal-ci/fetch-debian-jessie-installer.sh
+ - make -j4 all check ; (make clean; cd external/gard && make)
+ - (cd external/pflash; ./build-all-arch.sh)
+ - (cd external/opal-prd && make clean && make test && make clean && CROSS_COMPILE=powerpc64le-linux-gnu- make)
- make clean && SKIBOOT_GCOV=1 make && SKIBOOT_GCOV=1 make check
- make clean && rm -rf builddir && mkdir builddir && make SRC=`pwd` -f ../Makefile -C builddir
- make clean
@@ -31,6 +44,6 @@ addons:
name: "open-power/skiboot"
description: "Build submitted via Travis CI"
notification_email: stewart@linux.vnet.ibm.com
- build_command_prepend: "make clean; cov-configure --comptype gcc --compiler powerpc64-linux-gcc --template"
- build_command: "make -j4 all check gard"
+ build_command_prepend: "make clean; cov-configure --comptype gcc --compiler powerpc64-linux-gcc --template; cov-configure --comptype gcc --compiler powerpc64le-linux-gnu-gcc; cov-configure --comptype gcc --compiler arm-linux-gnueabi-gcc; cov-configure --comptype gcc --compiler x86_64-linux-gnu-gcc"
+ build_command: "make -j4 all check gard pflash-coverity"
branch_pattern: coverity_scan
diff --git a/Makefile.main b/Makefile.main
index 34591be..ea5fc8e 100644
--- a/Makefile.main
+++ b/Makefile.main
@@ -73,8 +73,8 @@ ifeq ($(STACK_CHECK),1)
CFLAGS += -fstack-protector-all -pg
CPPFLAGS += -DSTACK_CHECK_ENABLED
else
-# XXX Add -fstack-protector-strong on gcc 4.9
CFLAGS += -fstack-protector
+CFLAGS += $(call try-cflag,$(CC),-fstack-protector-strong)
endif
CFLAGS += $(call try-cflag,$(CC),-Wjump-misses-init) \
@@ -119,6 +119,7 @@ endif
CHECK = sparse
CHECKFLAGS := $(CF)
+CHECK_CFLAGS_SKIP = -std=gnu11
.SECONDARY:
@@ -149,6 +150,12 @@ include $(SRC)/$(DEVSRC)/Makefile.inc
gard:
(cd external/gard; make)
+pflash:
+ (cd external/pflash; make)
+
+pflash-coverity:
+ (cd external/pflash; ./build-all-arch.sh)
+
all: $(SUBDIRS) $(TARGET).lid $(TARGET).map extract-gcov
OBJS := $(ASM) $(CORE) $(HW) $(PLATFORMS) $(LIBFDT) $(LIBFLASH)
diff --git a/Makefile.rules b/Makefile.rules
index 3534f5d..ea52488 100644
--- a/Makefile.rules
+++ b/Makefile.rules
@@ -35,9 +35,9 @@ endef
ifeq ($(C),1)
ifeq ($(VERBOSE),1)
- cmd_check = $(CHECK) $(CHECKFLAGS) $(call cook_cflags,$@) $<
+ cmd_check = $(CHECK) $(CHECKFLAGS) $(filter-out $(CHECK_CFLAGS_SKIP),$(call cook_cflags,$@)) $<
else
- cmd_check = @$(CHECK) $(CHECKFLAGS) $(call cook_cflags,$@) $<
+ cmd_check = @$(CHECK) $(CHECKFLAGS) $(filter-out $(CHECK_CFLAGS_SKIP),$(call cook_cflags,$@)) $<
endif
endif
diff --git a/core/chip.c b/core/chip.c
index f2f1a96..729bccb 100644
--- a/core/chip.c
+++ b/core/chip.c
@@ -96,6 +96,11 @@ void init_chips(void)
0xffffffff);
chip->pcid = dt_prop_get_u32_def(xn, "ibm,proc-chip-id",
0xffffffff);
+ if (dt_prop_get_u32_def(xn, "ibm,occ-functional-state", 1))
+ chip->occ_functional = true;
+ else
+ chip->occ_functional = false;
+
list_head_init(&chip->i2cms);
list_head_init(&chip->lpc_clients);
};
diff --git a/core/device.c b/core/device.c
index ba983c7..4818d40 100644
--- a/core/device.c
+++ b/core/device.c
@@ -366,14 +366,20 @@ static struct dt_property *new_property(struct dt_node *node,
const char *name, size_t size)
{
struct dt_property *p = malloc(sizeof(*p) + size);
+ char *path;
+
if (!p) {
+ path = dt_get_path(node);
prerror("Failed to allocate property \"%s\" for %s of %zu bytes\n",
- name, dt_get_path(node), size);
+ name, path, size);
+ free(path);
abort();
}
if (dt_find_property(node, name)) {
+ path = dt_get_path(node);
prerror("Duplicate property \"%s\" in node %s\n",
- name, dt_get_path(node));
+ name, path);
+ free(path);
abort();
}
diff --git a/core/hostservices.c b/core/hostservices.c
index 815654d..672b57f 100644
--- a/core/hostservices.c
+++ b/core/hostservices.c
@@ -826,9 +826,8 @@ int host_services_occ_start(void)
int host_services_occ_stop(void)
{
- struct proc_chip *chip;
- int i, rc = 0, nr_chips=0;
- uint64_t chipids[MAX_CHIPS];
+ int i, rc = 0, nr_slaves = 0, nr_masters = 0;
+ uint64_t *master_chipids = NULL, *slave_chipids = NULL;
prlog(PR_INFO, "HBRT: OCC Stop requested\n");
@@ -837,22 +836,39 @@ int host_services_occ_stop(void)
return -ENOENT;
}
- for_each_chip(chip) {
- chipids[nr_chips++] = chip->id;
- }
+ rc = find_master_and_slave_occ(&master_chipids, &slave_chipids,
+ &nr_masters, &nr_slaves);
+ if (rc)
+ goto out;
- for (i = 0; i < nr_chips; i++)
+ for (i = 0; i < nr_slaves; i++)
prlog(PR_TRACE, "HBRT: Calling stopOCC() for %04llx ",
- chipids[i]);
+ slave_chipids[i]);
+
+ if (!nr_slaves)
+ goto master;
+
+ /* Lets STOP all the slave OCC */
+ rc = hservice_runtime->stopOCCs(slave_chipids, nr_slaves);
+ prlog(PR_DEBUG, "HBRT: stopOCCs() slave rc = %d\n", rc);
+
+master:
+ for (i = 0; i < nr_masters; i++)
+ prlog(PR_TRACE, "HBRT: Calling stopOCC() for %04llx ",
+ master_chipids[i]);
+
+ /* Lets STOP all the master OCC */
+ rc = hservice_runtime->stopOCCs(master_chipids, nr_masters);
- /* Lets STOP all OCC */
- rc = hservice_runtime->stopOCCs(chipids, nr_chips);
hservice_mark();
- prlog(PR_DEBUG, "HBRT: stopOCCs() rc = %d\n", rc);
+ prlog(PR_DEBUG, "HBRT: stopOCCs() master rc = %d\n", rc);
+
+out:
+ free(master_chipids);
+ free(slave_chipids);
return rc;
}
-
void host_services_occ_base_setup(void)
{
struct proc_chip *chip;
diff --git a/core/init.c b/core/init.c
index 7ae4dee..54a5735 100644
--- a/core/init.c
+++ b/core/init.c
@@ -734,12 +734,14 @@ void __noreturn main_cpu_entry(const void *fdt, u32 master_cpu)
opal_init_msg();
/* Probe IO hubs */
- probe_p5ioc2();
probe_p7ioc();
/* Probe PHB3 on P8 */
probe_phb3();
+ /* Probe NPUs */
+ probe_npu();
+
/* Initialize PCI */
pci_init_slots();
diff --git a/core/interrupts.c b/core/interrupts.c
index 32f43ef..aafdea9 100644
--- a/core/interrupts.c
+++ b/core/interrupts.c
@@ -15,6 +15,7 @@
*/
#include <skiboot.h>
+#include <chip.h>
#include <cpu.h>
#include <fsp.h>
#include <interrupts.h>
@@ -121,7 +122,7 @@ uint32_t get_psi_interrupt(uint32_t chip_id)
irq |= P7_PSI_IRQ_BUID << 4;
break;
case proc_gen_p8:
- irq = P8_CHIP_IRQ_BLOCK_BASE(chip_id, P8_IRQ_BLOCK_MISC);
+ irq = p8_chip_irq_block_base(chip_id, P8_IRQ_BLOCK_MISC);
irq += P8_IRQ_MISC_PSI_BASE;
break;
default:
@@ -253,6 +254,89 @@ void icp_kick_cpu(struct cpu_thread *cpu)
out_8(icp + ICP_MFRR, 0);
}
+/* Returns the number of chip ID bits used for interrupt numbers */
+static uint32_t p8_chip_id_bits(uint32_t chip)
+{
+ struct proc_chip *proc_chip = get_chip(chip);
+
+ assert(proc_chip);
+ switch (proc_chip->type) {
+ case PROC_CHIP_P8_MURANO:
+ case PROC_CHIP_P8_VENICE:
+ return 6;
+ break;
+
+ case PROC_CHIP_P8_NAPLES:
+ return 5;
+ break;
+
+ default:
+ /* This shouldn't be called on non-P8 based systems */
+ assert(0);
+ return 0;
+ break;
+ }
+}
+
+/* The chip id mask is the upper p8_chip_id_bits of the irq number */
+static uint32_t chip_id_mask(uint32_t chip)
+{
+ uint32_t chip_id_bits = p8_chip_id_bits(chip);
+ uint32_t chip_id_mask;
+
+ chip_id_mask = ((1 << chip_id_bits) - 1);
+ chip_id_mask <<= P8_IRQ_BITS - chip_id_bits;
+ return chip_id_mask;
+}
+
+/* The block mask is what remains of the 19 bit irq number after
+ * removing the upper 5 or 6 bits for the chip# and the lower 11 bits
+ * for the number of bits per block. */
+static uint32_t block_mask(uint32_t chip)
+{
+ uint32_t chip_id_bits = p8_chip_id_bits(chip);
+ uint32_t irq_block_mask;
+
+ irq_block_mask = P8_IRQ_BITS - chip_id_bits - P8_IVE_BITS;
+ irq_block_mask = ((1 << irq_block_mask) - 1) << P8_IVE_BITS;
+ return irq_block_mask;
+}
+
+uint32_t p8_chip_irq_block_base(uint32_t chip, uint32_t block)
+{
+ uint32_t irq;
+
+ assert(chip < (1 << p8_chip_id_bits(chip)));
+ irq = SETFIELD(chip_id_mask(chip), 0, chip);
+ irq = SETFIELD(block_mask(chip), irq, block);
+
+ return irq;
+}
+
+uint32_t p8_chip_irq_phb_base(uint32_t chip, uint32_t phb)
+{
+ assert(chip < (1 << p8_chip_id_bits(chip)));
+
+ return p8_chip_irq_block_base(chip, phb + P8_IRQ_BLOCK_PHB_BASE);
+}
+
+uint32_t p8_irq_to_chip(uint32_t irq)
+{
+ /* This assumes we only have one type of cpu in a system,
+ * which should be ok. */
+ return GETFIELD(chip_id_mask(this_cpu()->chip_id), irq);
+}
+
+uint32_t p8_irq_to_block(uint32_t irq)
+{
+ return GETFIELD(block_mask(this_cpu()->chip_id), irq);
+}
+
+uint32_t p8_irq_to_phb(uint32_t irq)
+{
+ return p8_irq_to_block(irq) - P8_IRQ_BLOCK_PHB_BASE;
+}
+
static struct irq_source *irq_find_source(uint32_t isn)
{
struct irq_source *is;
@@ -291,7 +375,7 @@ static int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority)
}
opal_call(OPAL_GET_XIVE, opal_get_xive, 3);
-static int64_t opal_handle_interrupt(uint32_t isn, uint64_t *outstanding_event_mask)
+static int64_t opal_handle_interrupt(uint32_t isn, __be64 *outstanding_event_mask)
{
struct irq_source *is = irq_find_source(isn);
int64_t rc = OPAL_SUCCESS;
@@ -312,7 +396,7 @@ static int64_t opal_handle_interrupt(uint32_t isn, uint64_t *outstanding_event_m
/* Update output events */
bail:
if (outstanding_event_mask)
- *outstanding_event_mask = opal_pending_events;
+ *outstanding_event_mask = cpu_to_be64(opal_pending_events);
return rc;
}
diff --git a/core/opal-msg.c b/core/opal-msg.c
index b0d8aaf..4a7cddb 100644
--- a/core/opal-msg.c
+++ b/core/opal-msg.c
@@ -55,7 +55,7 @@ int _opal_queue_msg(enum opal_msg_type msg_type, void *data,
entry->consumed = consumed;
entry->data = data;
- entry->msg.msg_type = msg_type;
+ entry->msg.msg_type = cpu_to_be32(msg_type);
if (num_params > ARRAY_SIZE(entry->msg.params)) {
prerror("Discarding extra parameters\n");
@@ -117,7 +117,7 @@ static int64_t opal_check_completion(uint64_t *buffer, uint64_t size,
lock(&opal_msg_lock);
list_for_each_safe(&msg_pending_list, entry, next_entry, link) {
if (entry->msg.msg_type == OPAL_MSG_ASYNC_COMP &&
- entry->msg.params[0] == token) {
+ be64_to_cpu(entry->msg.params[0]) == token) {
list_del(&entry->link);
callback = entry->consumed;
data = entry->data;
diff --git a/core/opal.c b/core/opal.c
index c2c4d8d..b6411f0 100644
--- a/core/opal.c
+++ b/core/opal.c
@@ -81,7 +81,7 @@ void opal_trace_entry(struct stack_frame *eframe);
void opal_trace_entry(struct stack_frame *eframe)
{
union trace t;
- unsigned nargs;
+ unsigned nargs, i;
if (this_cpu()->pir != mfspr(SPR_PIR)) {
printf("CPU MISMATCH ! PIR=%04lx cpu @%p -> pir=%04x\n",
@@ -93,10 +93,11 @@ void opal_trace_entry(struct stack_frame *eframe)
else
nargs = opal_num_args[eframe->gpr[0]];
- t.opal.token = eframe->gpr[0];
- t.opal.lr = eframe->lr;
- t.opal.sp = eframe->gpr[1];
- memcpy(t.opal.r3_to_11, &eframe->gpr[3], nargs*sizeof(u64));
+ t.opal.token = cpu_to_be64(eframe->gpr[0]);
+ t.opal.lr = cpu_to_be64(eframe->lr);
+ t.opal.sp = cpu_to_be64(eframe->gpr[1]);
+ for(i=0; i<nargs; i++)
+ t.opal.r3_to_11[i] = cpu_to_be64(eframe->gpr[3+i]);
trace_add(&t, TRACE_OPAL, offsetof(struct trace_opal, r3_to_11[nargs]));
}
@@ -160,6 +161,7 @@ void add_opal_node(void)
add_opal_firmware_node();
add_associativity_ref_point();
memcons_add_properties();
+ add_cpu_idle_state_properties();
}
static struct lock evt_lock = LOCK_UNLOCKED;
@@ -317,7 +319,7 @@ void opal_run_pollers(void)
check_stacks();
}
-static int64_t opal_poll_events(uint64_t *outstanding_event_mask)
+static int64_t opal_poll_events(__be64 *outstanding_event_mask)
{
/* Check if we need to trigger an attn for test use */
if (attn_trigger == 0xdeadbeef) {
@@ -334,7 +336,7 @@ static int64_t opal_poll_events(uint64_t *outstanding_event_mask)
opal_run_pollers();
if (outstanding_event_mask)
- *outstanding_event_mask = opal_pending_events;
+ *outstanding_event_mask = cpu_to_be64(opal_pending_events);
return OPAL_SUCCESS;
}
diff --git a/core/pci.c b/core/pci.c
index 6cfb3cb..03d5a35 100644
--- a/core/pci.c
+++ b/core/pci.c
@@ -22,27 +22,26 @@
#include <device.h>
#include <fsp.h>
-/* The eeh event code will need updating if this is ever increased to
- * support more than 64 phbs */
-static struct phb *phbs[64];
+#define MAX_PHB_ID 256
+static struct phb *phbs[MAX_PHB_ID];
#define PCITRACE(_p, _bdfn, fmt, a...) \
- prlog(PR_TRACE, "PHB%d:%02x:%02x.%x " fmt, \
+ prlog(PR_TRACE, "PHB#%04x:%02x:%02x.%x " fmt, \
(_p)->opal_id, \
((_bdfn) >> 8) & 0xff, \
((_bdfn) >> 3) & 0x1f, (_bdfn) & 0x7, ## a)
#define PCIDBG(_p, _bdfn, fmt, a...) \
- prlog(PR_DEBUG, "PHB%d:%02x:%02x.%x " fmt, \
+ prlog(PR_DEBUG, "PHB#%04x:%02x:%02x.%x " fmt, \
(_p)->opal_id, \
((_bdfn) >> 8) & 0xff, \
((_bdfn) >> 3) & 0x1f, (_bdfn) & 0x7, ## a)
#define PCINOTICE(_p, _bdfn, fmt, a...) \
- prlog(PR_NOTICE, "PHB%d:%02x:%02x.%x " fmt, \
+ prlog(PR_NOTICE, "PHB#%04x:%02x:%02x.%x " fmt, \
(_p)->opal_id, \
((_bdfn) >> 8) & 0xff, \
((_bdfn) >> 3) & 0x1f, (_bdfn) & 0x7, ## a)
#define PCIERR(_p, _bdfn, fmt, a...) \
- prlog(PR_ERR, "PHB%d:%02x:%02x.%x " fmt, \
+ prlog(PR_ERR, "PHB#%04x:%02x:%02x.%x " fmt, \
(_p)->opal_id, \
((_bdfn) >> 8) & 0xff, \
((_bdfn) >> 3) & 0x1f, (_bdfn) & 0x7, ## a)
@@ -162,7 +161,13 @@ static struct pci_device *pci_scan_one(struct phb *phb, struct pci_device *paren
goto fail;
}
pd->bdfn = bdfn;
+ pd->vdid = vdid;
+ pci_cfg_read32(phb, bdfn, PCI_CFG_SUBSYS_VENDOR_ID, &pd->sub_vdid);
+ pci_cfg_read32(phb, bdfn, PCI_CFG_REV_ID, &pd->class);
+ pd->class >>= 8;
+
pd->parent = parent;
+ list_head_init(&pd->pcrf);
list_head_init(&pd->children);
rc = pci_cfg_read8(phb, bdfn, PCI_CFG_HDR_TYPE, &htype);
if (rc) {
@@ -536,7 +541,7 @@ static uint8_t pci_scan(struct phb *phb, uint8_t bus, uint8_t max_bus,
* bridge (when we need to give aligned powers of two's
* on P7IOC). If is is set to false, we just adjust the
* subordinate bus number based on what we probed.
- *
+ *
*/
max_bus = save_max;
next_bus = phb->ops->choose_bus(phb, pd, next_bus,
@@ -787,28 +792,38 @@ static void pci_scan_phb(void *data)
pci_walk_dev(phb, pci_configure_mps, NULL);
}
-int64_t pci_register_phb(struct phb *phb)
+int64_t pci_register_phb(struct phb *phb, int opal_id)
{
- int64_t rc = OPAL_SUCCESS;
- unsigned int i;
-
- /* This is called at init time in non-concurrent way, so no lock needed */
- for (i = 0; i < ARRAY_SIZE(phbs); i++)
- if (!phbs[i])
- break;
- if (i >= ARRAY_SIZE(phbs)) {
- prerror("PHB: Failed to find a free ID slot\n");
- rc = OPAL_RESOURCE;
+ /* The user didn't specify an opal_id, allocate one */
+ if (opal_id == OPAL_DYNAMIC_PHB_ID) {
+ /* This is called at init time in non-concurrent way, so no lock needed */
+ for (opal_id = 0; opal_id < ARRAY_SIZE(phbs); opal_id++)
+ if (!phbs[opal_id])
+ break;
+ if (opal_id >= ARRAY_SIZE(phbs)) {
+ prerror("PHB: Failed to find a free ID slot\n");
+ return OPAL_RESOURCE;
+ }
} else {
- phbs[i] = phb;
- phb->opal_id = i;
- dt_add_property_cells(phb->dt_node, "ibm,opal-phbid",
- 0, phb->opal_id);
- PCIDBG(phb, 0, "PCI: Registered PHB\n");
+ if (opal_id >= ARRAY_SIZE(phbs)) {
+ prerror("PHB: ID %d out of range !\n", opal_id);
+ return OPAL_PARAMETER;
+ }
+ /* The user did specify an opal_id, check it's free */
+ if (phbs[opal_id]) {
+ prerror("PHB: Duplicate registration of ID %d\n", opal_id);
+ return OPAL_PARAMETER;
+ }
}
+
+ phbs[opal_id] = phb;
+ phb->opal_id = opal_id;
+ dt_add_property_cells(phb->dt_node, "ibm,opal-phbid", 0, phb->opal_id);
+ PCIDBG(phb, 0, "PCI: Registered PHB\n");
+
list_head_init(&phb->devices);
- return rc;
+ return OPAL_SUCCESS;
}
int64_t pci_unregister_phb(struct phb *phb)
@@ -1141,6 +1156,8 @@ static void pci_add_slot_properties(struct phb *phb, struct pci_slot_info *info,
char loc_code[LOC_CODE_SIZE];
size_t base_loc_code_len = 0, slot_label_len = 0;
+ loc_code[0] = '\0';
+
if (phb->base_loc_code) {
base_loc_code_len = strlen(phb->base_loc_code);
strcpy(loc_code, phb->base_loc_code);
@@ -1188,10 +1205,15 @@ static void pci_add_loc_code(struct dt_node *np, struct pci_device *pd)
uint8_t class, sub;
uint8_t pos, len;
- /* Look for a parent with a slot-location-code */
- while (p && !blcode) {
- blcode = dt_prop_get_def(p, "ibm,slot-location-code", NULL);
- p = p->parent;
+ /* If there is a label assigned to the function, use it on openpower machines */
+ if (pd->slot_info && strlen(pd->slot_info->label) && !fsp_present()) {
+ blcode = pd->slot_info->label;
+ } else {
+ /* Look for a parent with a slot-location-code */
+ while (p && !blcode) {
+ blcode = dt_prop_get_def(p, "ibm,slot-location-code", NULL);
+ p = p->parent;
+ }
}
if (!blcode)
return;
@@ -1320,7 +1342,7 @@ static void pci_add_one_node(struct phb *phb, struct pci_device *pd,
else
snprintf(name, MAX_NAME - 1, "%s@%x",
cname, (pd->bdfn >> 3) & 0x1f);
- np = dt_new(parent_node, name);
+ pd->dn = np = dt_new(parent_node, name);
/* XXX FIXME: make proper "compatible" properties */
if (pci_has_cap(pd, PCI_CFG_CAP_ID_EXP, false)) {
@@ -1349,8 +1371,8 @@ static void pci_add_one_node(struct phb *phb, struct pci_device *pd,
* - ...
*/
- /* Add slot properties if needed */
- if (pd->slot_info)
+ /* Add slot properties if needed and iff this is a bridge */
+ if (pd->slot_info && pd->is_bridge)
pci_add_slot_properties(phb, pd->slot_info, np);
/* Make up location code */
@@ -1366,7 +1388,6 @@ static void pci_add_one_node(struct phb *phb, struct pci_device *pd,
/* Print summary info about the device */
pci_print_summary_line(phb, pd, np, rev_class, cname);
-
if (!pd->is_bridge)
return;
@@ -1418,6 +1439,17 @@ static void pci_add_nodes(struct phb *phb)
pci_add_one_node(phb, pd, phb->dt_node, lstate, 0);
}
+static void pci_fixup_nodes(struct phb *phb)
+{
+ struct pci_device *pd;
+
+ if (!phb->ops->device_node_fixup)
+ return;
+
+ list_for_each(&phb->devices, pd, link)
+ phb->ops->device_node_fixup(phb, pd);
+}
+
static void __pci_reset(struct list_head *list)
{
struct pci_device *pd;
@@ -1448,9 +1480,11 @@ void pci_reset(void)
static void pci_do_jobs(void (*fn)(void *))
{
- void *jobs[ARRAY_SIZE(phbs)];
+ struct cpu_job **jobs;
int i;
+ jobs = zalloc(sizeof(struct cpu_job *) * ARRAY_SIZE(phbs));
+ assert(jobs);
for (i = 0; i < ARRAY_SIZE(phbs); i++) {
if (!phbs[i]) {
jobs[i] = NULL;
@@ -1473,6 +1507,7 @@ static void pci_do_jobs(void (*fn)(void *))
cpu_wait_job(jobs[i], true);
}
+ free(jobs);
}
void pci_init_slots(void)
@@ -1495,6 +1530,14 @@ void pci_init_slots(void)
continue;
pci_add_nodes(phbs[i]);
}
+
+ /* Do device node fixups now that all the devices have been
+ * added to the device tree. */
+ for (i = 0; i < ARRAY_SIZE(phbs); i++) {
+ if (!phbs[i])
+ continue;
+ pci_fixup_nodes(phbs[i]);
+ }
}
/*
@@ -1576,3 +1619,55 @@ void pci_restore_bridge_buses(struct phb *phb)
{
pci_walk_dev(phb, __pci_restore_bridge_buses, NULL);
}
+
+struct pci_cfg_reg_filter *pci_find_cfg_reg_filter(struct pci_device *pd,
+ uint32_t start, uint32_t len)
+{
+ struct pci_cfg_reg_filter *pcrf;
+
+ /* Check on the cached range, which contains holes */
+ if ((start + len) <= pd->pcrf_start ||
+ pd->pcrf_end <= start)
+ return NULL;
+
+ list_for_each(&pd->pcrf, pcrf, link) {
+ if (start >= pcrf->start &&
+ (start + len) <= (pcrf->start + pcrf->len))
+ return pcrf;
+ }
+
+ return NULL;
+}
+
+struct pci_cfg_reg_filter *pci_add_cfg_reg_filter(struct pci_device *pd,
+ uint32_t start, uint32_t len,
+ uint32_t flags,
+ pci_cfg_reg_func func)
+{
+ struct pci_cfg_reg_filter *pcrf;
+
+ pcrf = pci_find_cfg_reg_filter(pd, start, len);
+ if (pcrf)
+ return pcrf;
+
+ pcrf = zalloc(sizeof(*pcrf) + ((len + 0x4) & ~0x3));
+ if (!pcrf)
+ return NULL;
+
+ /* Don't validate the flags so that the private flags
+ * can be supported for debugging purpose.
+ */
+ pcrf->flags = flags;
+ pcrf->start = start;
+ pcrf->len = len;
+ pcrf->func = func;
+ pcrf->data = (uint8_t *)(pcrf + 1);
+
+ if (start < pd->pcrf_start)
+ pd->pcrf_start = start;
+ if (pd->pcrf_end < (start + len))
+ pd->pcrf_end = start + len;
+ list_add_tail(&pd->pcrf, &pcrf->link);
+
+ return pcrf;
+}
diff --git a/core/test/Makefile.check b/core/test/Makefile.check
index c1af4b3..2724aab 100644
--- a/core/test/Makefile.check
+++ b/core/test/Makefile.check
@@ -14,6 +14,7 @@ CORE_TEST := core/test/run-device \
core/test/run-pel \
core/test/run-pool \
core/test/run-time-utils \
+ core/test/run-timebase \
core/test/run-timer
CORE_TEST_NOSTUB := core/test/run-console-log
diff --git a/core/test/run-timebase.c b/core/test/run-timebase.c
new file mode 100644
index 0000000..2d4c83d
--- /dev/null
+++ b/core/test/run-timebase.c
@@ -0,0 +1,56 @@
+/* Copyright 2013-2015 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
+#define __TEST__
+#include <timebase.h>
+
+int main(void)
+{
+ /* This is a fairly solid assumption that the math we're doing
+ * is based on tb_hz of exactly 512mhz.
+ * If we do start doing the math on different tb_hz, you probably
+ * want to go and audit every bit of code that touches tb to
+ * count/delay things.
+ */
+ assert(tb_hz == 512000000);
+ assert(secs_to_tb(1) == tb_hz);
+ assert(secs_to_tb(2) == 1024000000);
+ assert(secs_to_tb(10) == 5120000000);
+ assert(tb_to_secs(512000000) == 1);
+ assert(tb_to_secs(5120000000) == 10);
+ assert(tb_to_secs(1024000000) == 2);
+
+ assert(msecs_to_tb(1) == 512000);
+ assert(msecs_to_tb(100) == 51200000);
+ assert(msecs_to_tb(5) == 2560000);
+ assert(tb_to_msecs(512000) == 1);
+
+ assert(usecs_to_tb(5) == 2560);
+ assert(tb_to_usecs(2560) == 5);
+ assert(usecs_to_tb(5)*1000 == msecs_to_tb(5));
+ assert(tb_to_usecs(512000) == 1000);
+
+ assert(tb_compare(msecs_to_tb(5), usecs_to_tb(5)) == TB_AAFTERB);
+ assert(tb_compare(msecs_to_tb(5), usecs_to_tb(50000)) == TB_ABEFOREB);
+ assert(tb_compare(msecs_to_tb(5), usecs_to_tb(5)*1000) == TB_AEQUALB);
+
+ return 0;
+}
diff --git a/core/test/run-trace.c b/core/test/run-trace.c
index fc08512..fa8a30b 100644
--- a/core/test/run-trace.c
+++ b/core/test/run-trace.c
@@ -165,6 +165,7 @@ static bool all_done(const bool done[])
static void test_parallel(void)
{
void *p;
+ unsigned int cpu;
unsigned int i, counts[CPUS] = { 0 }, overflows[CPUS] = { 0 };
unsigned int repeats[CPUS] = { 0 }, num_overflows[CPUS] = { 0 };
bool done[CPUS] = { false };
@@ -224,9 +225,13 @@ static void test_parallel(void)
assert(be16_to_cpu(t.repeat.num) <= be16_to_cpu(t.hdr.cpu));
repeats[be16_to_cpu(t.hdr.cpu)] += be16_to_cpu(t.repeat.num);
} else if (t.hdr.type == 0x70) {
- done[be16_to_cpu(t.hdr.cpu)] = true;
+ cpu = be16_to_cpu(t.hdr.cpu);
+ assert(cpu < CPUS);
+ done[cpu] = true;
} else {
- counts[be16_to_cpu(t.hdr.cpu)]++;
+ cpu = be16_to_cpu(t.hdr.cpu);
+ assert(cpu < CPUS);
+ counts[cpu]++;
}
}
diff --git a/core/trace.c b/core/trace.c
index 15e505a..10b70bd 100644
--- a/core/trace.c
+++ b/core/trace.c
@@ -40,8 +40,8 @@ static struct {
void init_boot_tracebuf(struct cpu_thread *boot_cpu)
{
init_lock(&boot_tracebuf.trace_info.lock);
- boot_tracebuf.trace_info.tb.mask = BOOT_TBUF_SZ - 1;
- boot_tracebuf.trace_info.tb.max_size = MAX_SIZE;
+ boot_tracebuf.trace_info.tb.mask = cpu_to_be64(BOOT_TBUF_SZ - 1);
+ boot_tracebuf.trace_info.tb.max_size = cpu_to_be32(MAX_SIZE);
boot_cpu->trace = &boot_tracebuf.trace_info;
}
@@ -103,7 +103,7 @@ static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
rpt->type = TRACE_REPEAT;
rpt->len_div_8 = sizeof(*rpt) >> 3;
rpt->cpu = trace->hdr.cpu;
- rpt->prev_len = trace->hdr.len_div_8 << 3;
+ rpt->prev_len = cpu_to_be16(trace->hdr.len_div_8 << 3);
rpt->num = cpu_to_be16(1);
lwsync(); /* write barrier: complete repeat record before exposing */
tb->end = cpu_to_be64(be64_to_cpu(tb->end) + sizeof(*rpt));
diff --git a/core/utils.c b/core/utils.c
index 5d86ed0..4bb89df 100644
--- a/core/utils.c
+++ b/core/utils.c
@@ -22,7 +22,8 @@
#include <cpu.h>
#include <stack.h>
-unsigned long __stack_chk_guard = 0xdeadf00dbaad300d;
+extern unsigned long __stack_chk_guard;
+unsigned long __stack_chk_guard = 0xdeadf00dbaad300dULL;
void __noreturn assert_fail(const char *msg)
{
diff --git a/coverity-model.c b/coverity-model.c
new file mode 100644
index 0000000..b11f3bf
--- /dev/null
+++ b/coverity-model.c
@@ -0,0 +1,23 @@
+void __attribute__((noreturn)) _abort(void) {
+ __coverity_panic__();
+}
+
+void *__memalign(size_t blocksize, size_t bytes, const char *location) {
+ __coverity_alloc__(bytes);
+}
+
+void mem_free(struct mem_region *region, void *mem, const char *location) {
+ __coverity_free__(mem);
+}
+
+void lock(struct lock *l) {
+ __coverity_exclusive_lock_acquire__(l);
+}
+
+void unlock(struct lock *l) {
+ __coverity_exclusive_lock_release__(l);
+}
+
+static inline void cpu_relax(void) {
+ __coverity_sleep__();
+}
diff --git a/doc/device-tree/nvlink.txt b/doc/device-tree/nvlink.txt
new file mode 100644
index 0000000..70e9545
--- /dev/null
+++ b/doc/device-tree/nvlink.txt
@@ -0,0 +1,137 @@
+===========================
+Nvlink Device Tree Bindings
+===========================
+
+See doc/nvlink.txt for general Nvlink information.
+
+NPU bindings:
+
+xscom@3fc0000000000 {
+ npu@8013c00 {
+ reg = <0x8013c00 0x2c>;
+ compatible = "ibm,power8-npu";
+ ibm,npu-index = <0x0>;
+ ibm,npu-links = <0x4>;
+
+; Number of links wired up to this npu.
+
+ phandle = <0x100002bc>;
+ linux,phandle = <0x100002bc>;
+
+ link@0 {
+ ibm,npu-pbcq = <0x1000000b>;
+
+; phandle to the pbcq which connects to the GPU.
+
+ ibm,npu-phy = <0x80000000 0x8010c3f>;
+
+; SCOM address of the IBM PHY controlling this link.
+
+ compatible = "ibm,npu-link";
+ ibm,npu-lane-mask = <0xff>;
+
+; Mask specifying which IBM PHY lanes are used for this link.
+
+ phandle = <0x100002bd>;
+ ibm,npu-link-index = <0x0>;
+
+; Hardware link index. Naples systems contain links at index 0,1,4 & 5.
+; Used to calculate various address offsets.
+
+ linux,phandle = <0x100002bd>;
+ };
+
+ link@1 {
+ ibm,npu-pbcq = <0x1000000b>;
+ ibm,npu-phy = <0x80000000 0x8010c3f>;
+ compatible = "ibm,npu-link";
+ ibm,npu-lane-mask = <0xff00>;
+ phandle = <0x100002be>;
+ ibm,npu-link-index = <0x1>;
+ linux,phandle = <0x100002be>;
+ };
+
+ link@4 {
+ ibm,npu-pbcq = <0x1000000a>;
+ ibm,npu-phy = <0x80000000 0x8010c7f>;
+ compatible = "ibm,npu-link";
+ ibm,npu-lane-mask = <0xff00>;
+ phandle = <0x100002bf>;
+ ibm,npu-link-index = <0x4>;
+ linux,phandle = <0x100002bf>;
+ };
+
+ link@5 {
+ ibm,npu-pbcq = <0x1000000a>;
+ ibm,npu-phy = <0x80000000 0x8010c7f>;
+ compatible = "ibm,npu-link";
+ ibm,npu-lane-mask = <0xff>;
+ phandle = <0x100002c0>;
+ ibm,npu-link-index = <0x5>;
+ linux,phandle = <0x100002c0>;
+ };
+ };
+};
+
+Emulated PCI device bindings:
+
+ pciex@3fff000400000 {
+ ibm,npcq = <0x100002bc>;
+
+; phandle to the NPU node. Used to find associated PCI GPU devices.
+
+ compatible = "ibm,power8-npu-pciex", "ibm,ioda2-npu-phb";
+
+ pci@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ revision-id = <0x0>;
+ interrupts = <0x1>;
+ device-id = <0x4ea>;
+ ibm,pci-config-space-type = <0x1>;
+ vendor-id = <0x1014>;
+ ibm,gpu = <0x100002f7>;
+
+; phandle pointing the associated GPU PCI device node
+
+ phandle = <0x100002fc>;
+ };
+
+ pci@1 {
+ reg = <0x800 0x0 0x0 0x0 0x0>;
+ revision-id = <0x0>;
+ interrupts = <0x1>;
+ device-id = <0x4ea>;
+ ibm,pci-config-space-type = <0x1>;
+ vendor-id = <0x1014>;
+ ibm,gpu = <0x100002f5>;
+ phandle = <0x100002fe>;
+ class-code = <0x60400>;
+ linux,phandle = <0x100002fe>;
+ };
+
+ pci@0,1 {
+ reg = <0x100 0x0 0x0 0x0 0x0>;
+ revision-id = <0x0>;
+ interrupts = <0x2>;
+ device-id = <0x4ea>;
+ ibm,pci-config-space-type = <0x1>;
+ vendor-id = <0x1014>;
+ ibm,gpu = <0x100002f7>;
+ phandle = <0x100002fd>;
+ class-code = <0x60400>;
+ linux,phandle = <0x100002fd>;
+ };
+
+ pci@1,1 {
+ reg = <0x900 0x0 0x0 0x0 0x0>;
+ revision-id = <0x0>;
+ interrupts = <0x2>;
+ device-id = <0x4ea>;
+ ibm,pci-config-space-type = <0x1>;
+ vendor-id = <0x1014>;
+ ibm,gpu = <0x100002f5>;
+ phandle = <0x100002ff>;
+ class-code = <0x60400>;
+ linux,phandle = <0x100002ff>;
+ };
+ };
diff --git a/doc/nvlink.txt b/doc/nvlink.txt
new file mode 100644
index 0000000..d871d20
--- /dev/null
+++ b/doc/nvlink.txt
@@ -0,0 +1,157 @@
+OPAL/Skiboot Nvlink Interface Documentation
+----------------------------------------------------------------------
+
+========
+Overview
+========
+
+NV-Link is a high speed interconnect that is used in conjunction with
+a PCI-E connection to create an interface between chips that provides
+very high data bandwidth. The PCI-E connection is used as the control
+path to initiate and report status of large data transfers. The data
+transfers themselves are sent over the NV-Link.
+
+On IBM Power systems the NV-Link hardware is similar to our standard
+PCI hardware so to maximise code reuse the NV-Link is exposed as an
+emulated PCI device through system firmware (OPAL/skiboot). Thus each
+NV-Link capable device will appear as two devices on a system, the
+real PCI-E device and at least one emulated PCI device used for the
+NV-Link.
+
+Presently the NV-Link is only capable of data transfers initiated by
+the target, thus the emulated PCI device will only handle registers
+for link initialisation, DMA transfers and error reporting (EEH).
+
+====================
+Emulated PCI Devices
+====================
+
+Each link will be exported as an emulated PCI device with a minimum of
+two emulated PCI devices per GPU. Emulated PCI devices are grouped per
+GPU.
+
+The emulated PCI device will be exported as a standard PCI device by
+the Linux kernel. It has a standard PCI configuration space to expose
+necessary device parameters. The only functionality available is
+related to the setup of DMA windows.
+
+Configuration Space Parameters
+-----------------------------
+
+Vendor ID = 0x1014 (IBM)
+Device ID = 0x04ea
+Revision ID = 0x00
+Class = 0x068000 (Bridge Device Other, ProgIf = 0x0)
+BAR0/1 = TL/DL Registers
+
+TL/DL Registers
+---------------
+
+Each link has 128KB of TL/DL registers. These will always be mapped
+to 64-bit BAR#0 of the emulated PCI device configuration space.
+
+BAR#0 + 128K +-----------+
+ | NTL (64K) |
+BAR#0 + 64K +-----------+
+ | DL (64K) |
+BAR#0 +-----------+
+
+Vendor Specific Capabilities
+----------------------------
+
++-----------------+----------------+----------------+----------------+
+| Version (0x02) | Cap Length | Next Cap Ptr | Cap ID (0x09) |
++-----------------+----------------+----------------+----------------+
+| Procedure Status Register |
++--------------------------------------------------------------------+
+| Procedure Control Register |
++---------------------------------------------------+----------------+
+| Reserved | PCI Dev Flag | Link Number |
++---------------------------------------------------+----------------+
+
+Version
+
+ This refers to the version of the NPU config space. Used by device
+ drivers to determine which fields of the config space they can
+ expect to be available.
+
+Procedure Control Register
+
+ Used to start hardware procedures.
+
+ Writes will start the corresponding procedure and set bit 31 in the
+ procedure status register. This register must not be written while
+ bit 31 is set in the status register. Performing a write while
+ another procudure is already in progress will abort that procedure.
+
+ Reads will return the in progress procedure or the last completed
+ procedure number depending on the procedure status field.
+
+ Procedure Numbers:
+ 0 - Abort in-progress procedure
+ 1 - NOP
+ 2 - Unsupported procedure
+ 3 - Unsupported procedure
+ 4 - Naples PHY - RESET
+ 5 - Naples PHY - TX_ZCAL
+ 6 - Naples PHY - RX_DCCAL
+ 7 - Naples PHY - TX_RXCAL_ENABLE
+ 8 - Naples PHY - TX_RXCAL_DISABLE
+ 9 - Naples PHY - RX_TRAINING
+ 10 - Naples NPU - RESET
+ 11 - Naples PHY - PHY preterminate
+ 12 - Naples PHY - PHY terminated
+
+ Procedure 5 (TX_ZCAL) should only be run once. System firmware will
+ ensure this so device drivers may call this procedure mutiple
+ times.
+
+Procedure Status Register
+
+ The procedure status register is used to determine when execution
+ of the procedure number in the control register is complete and if
+ it completed successfully.
+
+ This register must be polled frequently to allow system firmware to
+ execute the procedures.
+
+ Fields:
+ Bit 31 - Procedure in progress
+ Bit 30 - Procedure complete
+ Bit 3-0 - Procedure completion code
+
+ Procedure completion codes:
+ 0 - Procedure completed successfully.
+ 1 - Transient failure. Procedure should be rerun.
+ 2 - Permanent failure. Procedure will never complete successfully.
+ 3 - Procedure aborted.
+ 4 - Unsupported procedure.
+
+PCI Device Flag
+
+ Bit 0 is set only if an actual PCI device was bound to this
+ emulated device.
+
+Link Number
+
+ Physical link number this emulated PCI device is assoicated
+ with. One of 0, 1, 4 or 5 (links 2 & 3 do not exist on Naples).
+
+Reserved
+
+ These fields must be ignored and no value should be assumed.
+
+Interrupts
+----------
+
+Each link has a single DL/TL interrupt assigned to it. These will be
+exposed as an LSI via the emulated PCI device. There are 4 links
+consuming 4 LSI interrupts. The 4 remaining interrupts supported by the
+corresponding PHB will be routed to OS platform for the purpose of error
+reporting.
+
+====================
+Device Tree Bindings
+====================
+
+See doc/device-tree/nvlink.txt
diff --git a/doc/stable-skiboot-rules.txt b/doc/stable-skiboot-rules.txt
new file mode 100644
index 0000000..8ad7e6f
--- /dev/null
+++ b/doc/stable-skiboot-rules.txt
@@ -0,0 +1,62 @@
+Stable Skiboot tree/releases
+----------------------------
+
+If you're at all familiar with the Linux kernel stable trees, this should
+seem fairly familiar.
+
+The purpose of a -stable tree is to give vendors a stable base to create
+firmware releases from and to incorporate into service packs. New stable
+releases contain critical fixes only.
+
+As a general rule, on the most recent skiboot release gets a maintained
+-stable tree. If you wish to maintain an older tree, speak up! For example,
+with my IBMer hat on, we'll maintain branches that we ship in products.
+
+What patches are accepted?
+--------------------------
+
+- Patches must be obviously correct and tested
+ - A Tested-by signoff is *important*
+- A patch must fix a real bug
+- No trivial patches, such fixups belong in main branch
+- Not fix a purely theoretical problem unless you can prove how
+ it's exploitable
+- The patch, or an equivalent one, must already be in master
+ - Submitting to both at the same time is okay, but backporting is better
+
+HOWTO submit to stable
+----------------------
+Two ways:
+1) Send patch to the skiboot@ list with "[PATCH stable]" in subject
+ - This targets the patch *ONLY* to the stable branch.
+ - Such commits will *NOT* be merged into master.
+ - Use this when:
+ a) cherry-picking a fix from master
+ b) fixing something that is only broken in stable
+ c) fix in stable needs to be completely different than in master
+ If b or c: explain why.
+ - If cherry-picking, include the following at the top of your
+ commit message:
+ commit <sha1> upstream.
+ - If the patch has been modified, explain why in description.
+
+2) Add "Cc: stable" above your Signed-off-by line when sending to skiboot@
+ - This targets the patch to master and stable.
+ - You can target a patch to a specific stable tree with:
+ Cc: stable # 5.1.x
+ and that will target it to the 5.1.x branch.
+ - You can ask for prerequisites to be cherry-picked:
+ Cc: stable # 5.1.x 55ae15b Ensure we run pollers in cpu_wait_job()
+ Cc: stable # 5.1.x
+ Which means:
+ 1) please git cherry-pick 55ae15b
+ 2) then apply this patch to 5.1.x".
+
+Trees
+-----
+- https://github.com/open-power/skiboot/tree/stable
+ git@github.com:open-power/skiboot.git (stable branch)
+
+- Some stable versions may last longer than others
+ - So there may be stable, stable-5.1 and stable-5.0 at any one time being
+ actively maintained.
diff --git a/external/common/arch_flash.h b/external/common/arch_flash.h
index 60c4de8..918ffa9 100644
--- a/external/common/arch_flash.h
+++ b/external/common/arch_flash.h
@@ -20,7 +20,8 @@
#include <getopt.h>
#include <libflash/blocklevel.h>
-int arch_flash_init(struct blocklevel_device **bl, const char *file);
+int arch_flash_init(struct blocklevel_device **bl, const char *file,
+ bool keep_alive);
void arch_flash_close(struct blocklevel_device *bl, const char *file);
diff --git a/external/common/arch_flash_arm.c b/external/common/arch_flash_arm.c
index b3c9454..f65bddc 100644
--- a/external/common/arch_flash_arm.c
+++ b/external/common/arch_flash_arm.c
@@ -32,6 +32,7 @@
#include <libflash/file.h>
#include "ast.h"
#include "arch_flash.h"
+#include "arch_flash_arm_io.h"
struct flash_chip;
@@ -262,7 +263,7 @@ int arch_flash_set_wrprotect(struct blocklevel_device *bl, int set)
return set_wrprotect(set);
}
-int arch_flash_init(struct blocklevel_device **r_bl, const char *file)
+int arch_flash_init(struct blocklevel_device **r_bl, const char *file, bool keep_alive)
{
struct blocklevel_device *new_bl;
@@ -271,7 +272,7 @@ int arch_flash_init(struct blocklevel_device **r_bl, const char *file)
return -1;
if (file) {
- file_init_path(file, NULL, &new_bl);
+ file_init_path(file, NULL, keep_alive, &new_bl);
} else {
new_bl = flash_setup(arch_data.bmc);
}
diff --git a/external/common/arch_flash_powerpc.c b/external/common/arch_flash_powerpc.c
index a647219..19dfec8 100644
--- a/external/common/arch_flash_powerpc.c
+++ b/external/common/arch_flash_powerpc.c
@@ -188,11 +188,11 @@ static int get_dev_mtd(const char *fdt_flash_path, char **mtd_path)
return done ? rc : -1;
}
-static struct blocklevel_device *arch_init_blocklevel(const char *file)
+static struct blocklevel_device *arch_init_blocklevel(const char *file, bool keep_alive)
{
int rc;
struct blocklevel_device *new_bl = NULL;
- char *real_file;
+ char *real_file = NULL;
if (!file) {
rc = get_dev_mtd(FDT_FLASH_PATH, &real_file);
@@ -200,7 +200,8 @@ static struct blocklevel_device *arch_init_blocklevel(const char *file)
return NULL;
}
- file_init_path(file ? file : real_file, NULL, &new_bl);
+ file_init_path(file ? file : real_file, NULL, keep_alive, &new_bl);
+ free(real_file);
return new_bl;
}
@@ -210,11 +211,11 @@ int arch_flash_set_wrprotect(struct blocklevel_device *bl, int set)
return 0;
}
-int arch_flash_init(struct blocklevel_device **r_bl, const char *file)
+int arch_flash_init(struct blocklevel_device **r_bl, const char *file, bool keep_alive)
{
struct blocklevel_device *new_bl;
- new_bl = arch_init_blocklevel(file);
+ new_bl = arch_init_blocklevel(file, keep_alive);
if (!new_bl)
return -1;
diff --git a/external/common/arch_flash_x86.c b/external/common/arch_flash_x86.c
index 29c0229..3be05df 100644
--- a/external/common/arch_flash_x86.c
+++ b/external/common/arch_flash_x86.c
@@ -28,7 +28,7 @@
#include "arch_flash.h"
-int arch_flash_init(struct blocklevel_device **r_bl, const char *file)
+int arch_flash_init(struct blocklevel_device **r_bl, const char *file, bool keep_alive)
{
struct blocklevel_device *new_bl;
@@ -38,7 +38,7 @@ int arch_flash_init(struct blocklevel_device **r_bl, const char *file)
return -1;
}
- file_init_path(file, NULL, &new_bl);
+ file_init_path(file, NULL, keep_alive, &new_bl);
if (!new_bl)
return -1;
diff --git a/external/common/get_arch.sh b/external/common/get_arch.sh
index 18a5cef..f4beb1d 100755
--- a/external/common/get_arch.sh
+++ b/external/common/get_arch.sh
@@ -7,5 +7,5 @@ echo -n ARCH_X86
echo -n ARCH_ARM
#else
echo -n ARCH_UNKNOWN
-#endif" | $1cpp | sh
+#endif" | $1cpp | /bin/sh
diff --git a/external/common/rules.mk b/external/common/rules.mk
index bc565e8..ec20593 100644
--- a/external/common/rules.mk
+++ b/external/common/rules.mk
@@ -1,36 +1,49 @@
-ARCH = $(shell $(GET_ARCH) "$(CROSS_COMPILE)")
+ARCH := $(shell $(GET_ARCH) "$(CROSS_COMPILE)")
ifeq ($(ARCH),ARCH_ARM)
-arch = arm
-ARCH_OBJS = common/arch_flash_common.o common/arch_flash_arm.o common/ast-sf-ctrl.o
+arch := arm
+ARCH_FILES := arch_flash_common.c arch_flash_arm.c ast-sf-ctrl.c
else
ifeq ($(ARCH),ARCH_POWERPC)
-arch = powerpc
-ARCH_OBJS = common/arch_flash_common.o common/arch_flash_powerpc.o
+arch := powerpc
+ARCH_FILES := arch_flash_common.c arch_flash_powerpc.c
else
ifeq ($(ARCH),ARCH_X86)
-arch = x86
-ARCH_OBJS = common/arch_flash_common.o common/arch_flash_x86.o
+arch := x86
+ARCH_FILES := arch_flash_common.c arch_flash_x86.c
else
$(error Unsupported architecture $(ARCH))
endif
endif
endif
-.PHONY: arch_links
-arch_links:
- ln -sf ../../hw/ast-bmc/ast-sf-ctrl.c common/ast-sf-ctrl.c
- ln -sf arch_flash_$(arch)_io.h common/io.h
+ARCH_SRC := $(addprefix common/,$(ARCH_FILES))
+ARCH_OBJS := $(addprefix common-,$(ARCH_FILES:.c=.o))
+
+# Arch links are like this so we can have dependencies work (so that we don't
+# run the rule when the links exist), pretty build output (knowing the target
+# name) and a list of the files so we can clean them up.
+ARCH_LINKS := common/ast-sf-ctrl.c common/ast.h common/io.h
+
+arch_links: $(ARCH_LINKS)
+common/ast.h : ../../include/ast.h | common
+ $(Q_LN)ln -sf ../../include/ast.h common/ast.h
+
+common/io.h : ../common/arch_flash_$(arch)_io.h | common
+ $(Q_LN)ln -sf arch_flash_$(arch)_io.h common/io.h
+
+common/ast-sf-ctrl.c : ../../hw/ast-bmc/ast-sf-ctrl.c | common
+ $(Q_LN)ln -sf ../../hw/ast-bmc/ast-sf-ctrl.c common/ast-sf-ctrl.c
.PHONY: arch_clean
arch_clean:
- rm -rf $(ARCH_OBJS)
+ rm -rf $(ARCH_OBJS) $(ARCH_LINKS)
+
+$(ARCH_SRC): | common
-#If arch changes make won't realise it needs to rebuild...
-.PHONY: .FORCE
-common/arch_flash_common.o: common/arch_flash_common.c .FORCE
- $(CROSS_COMPILE)gcc $(CFLAGS) -c $< -o $@
+$(ARCH_OBJS): common-%.o: common/%.c
+ $(Q_CC)$(CROSS_COMPILE)gcc $(CFLAGS) $(CPPFLAGS) -c $< -o $@
-common/arch_flash.o: $(ARCH_OBJS)
- $(CROSS_COMPILE)ld -r $(ARCH_OBJS) -o $@
+common-arch_flash.o: $(ARCH_OBJS)
+ $(Q_LD)$(CROSS_COMPILE)ld -r $(ARCH_OBJS) -o $@
diff --git a/external/gard/.gitignore b/external/gard/.gitignore
index df7d675..7c81877 100644
--- a/external/gard/.gitignore
+++ b/external/gard/.gitignore
@@ -1,3 +1,10 @@
-*.o
+o
*.d
+ccan
+common
+common-ast-sf-ctrl.c
+common-io.h
gard
+test/test.sh
+libflash
+make_version.sh
diff --git a/external/gard/Makefile b/external/gard/Makefile
index f43be43..be3d1ba 100644
--- a/external/gard/Makefile
+++ b/external/gard/Makefile
@@ -1,37 +1,54 @@
# This tool is a linux userland tool and should be completely stand alone
+include rules.mk
+GET_ARCH = ../../external/common/get_arch.sh
+include ../../external/common/rules.mk
-prefix = /usr/local/
-sbindir = $(prefix)/sbin
-datadir = $(prefix)/share
-mandir = $(datadir)/man
+all: $(EXE)
-CC = $(CROSS_COMPILE)gcc
-CFLAGS += -m64 -Werror -Wall -g2 -ggdb
-LDFLAGS += -m64
-ASFLAGS = -m64
-CPPFLAGS += -I. -I../../
+.PHONY: links
+links: libflash ccan common make_version.sh
-OBJS = file.o gard.o libflash.o libffs.o ecc.o blocklevel.o
+libflash:
+ ln -sf ../../libflash .
-EXE = gard
+ccan:
+ ln -sf ../../ccan .
-all: $(EXE)
+common:
+ ln -sf ../common .
+
+make_version.sh:
+ ln -sf ../../make_version.sh
-%.o: %.c
- $(COMPILE.c) $< -o $@
+#Rebuild version.o so that the the version always matches
+#what the test suite will get from ./make_version.sh
+check: version.o all
+ @ln -sf ../../test/test.sh test/test.sh
+ @test/test-gard
-%.o: ../../libflash/%.c
- $(COMPILE.c) $< -o $@
+$(OBJS): | links arch_links
-$(EXE): $(OBJS)
- $(LINK.o) -o $@ $^
+.PHONY: VERSION-always
+.version: VERSION-always
+ @echo $(GARD_VERSION) > $@.tmp
+ @cmp -s $@ $@.tmp || cp $@.tmp $@
+ @rm -f $@.tmp
-install: all
- install -D gard $(DESTDIR)$(sbindir)/opal-gard
- install -D -m 0644 opal-gard.1 $(DESTDIR)$(mandir)/man1/opal-gard.1
+.PHONY: dist
+#File is named $(PFLASH_VERSION).tar because the expectation is that pflash-
+#is always at the start of the verion. This remains consistent with skiboot
+#version strings
+dist: links .version
+ find -L ../pflash/ -iname '*.[ch]' -print0 | xargs -0 tar -rhf $(PFLASH_VERSION).tar
+ tar --transform 's/Makefile.dist/Makefile/' -rhf $(PFLASH_VERSION).tar \
+ ../pflash/Makefile.dist ../pflash/rules.mk \
+ ../pflash/.version ../pflash/make_version.sh \
+ ../pflash/common/*
-clean:
- rm -f $(OBJS) $(EXE) *.d
+clean: arch_clean
+ rm -f $(OBJS) $(EXE) *.o *.d .version .version.tmp
distclean: clean
+ rm -f *.c~ *.h~ *.sh~ Makefile~ config.mk~ libflash/*.c~ libflash/*.h~
+ rm -f libflash ccan common io.h make_version.sh
diff --git a/external/gard/Makefile.dist b/external/gard/Makefile.dist
new file mode 100644
index 0000000..46edcb1
--- /dev/null
+++ b/external/gard/Makefile.dist
@@ -0,0 +1,10 @@
+include rules.mk
+GET_ARCH = common/get_arch.sh
+include common/rules.mk
+
+all: $(EXE)
+
+clean:
+ rm -f $(OBJS) *.o
+distclean: clean
+ rm -f $(EXE)
diff --git a/external/gard/gard.c b/external/gard/gard.c
index 869ada0..2163707 100644
--- a/external/gard/gard.c
+++ b/external/gard/gard.c
@@ -37,6 +37,7 @@
#include <libflash/file.h>
#include <libflash/ecc.h>
#include <libflash/blocklevel.h>
+#include <common/arch_flash.h>
#include "gard.h"
@@ -46,6 +47,9 @@
#define SYSFS_MTD_PATH "/sys/class/mtd/"
#define FLASH_GARD_PART "GUARD"
+/* Full gard version number (possibly includes gitid). */
+extern const char version[];
+
struct gard_ctx {
bool ecc;
uint32_t f_size;
@@ -83,7 +87,7 @@ static void show_flash_err(int rc)
case FFS_ERR_PART_NOT_FOUND:
fprintf(stderr, "libffs flash partition not found\n");
break;
- /* ------- */
+ /* ------- */
case FLASH_ERR_MALLOC_FAILED:
fprintf(stderr, "libflash malloc failed\n");
break;
@@ -224,152 +228,15 @@ static const char *path_type_to_str(enum path_type t)
return "Unknown";
}
-static bool get_dev_attr(const char *dev, const char *attr_file, uint32_t *attr)
-{
- char dev_path[PATH_MAX] = SYSFS_MTD_PATH;
- /*
- * Needs to be large enough to hold at most uint32_t represented as a
- * string in hex with leading 0x
- */
- char attr_buf[10];
- int fd, rc;
-
- /*
- * sizeof(dev_path) - (strlen(dev_path) + 1) is the remaining space in
- * dev_path, + 1 to account for the '\0'. As strncat could write n+1 bytes
- * to dev_path the correct calulcation for n is:
- * (sizeof(dev_path) - (strlen(dev_path) + 1) - 1)
- */
- strncat(dev_path, dev, (sizeof(dev_path) - (strlen(dev_path) + 1) - 1));
- strncat(dev_path, "/", (sizeof(dev_path) - (strlen(dev_path) + 1) - 1));
- strncat(dev_path, attr_file, (sizeof(dev_path) - (strlen(dev_path) + 1) - 1));
- fd = open(dev_path, O_RDONLY);
- if (fd == -1)
- goto out;
-
- rc = read(fd, attr_buf, sizeof(attr_buf));
- close(fd);
- if (rc == -1)
- goto out;
-
- if (attr)
- *attr = strtol(attr_buf, NULL, 0);
-
- return 0;
-
-out:
- fprintf(stderr, "Couldn't get MTD device attribute '%s' from '%s'\n", dev, attr_file);
- return -1;
-}
-
-static int get_dev_mtd(const char *fdt_flash_path, char **r_path)
-{
- struct dirent **namelist;
- char fdt_node_path[PATH_MAX];
- int count, i, rc, fd;
- bool done;
-
- if (!fdt_flash_path)
- return -1;
-
- fd = open(fdt_flash_path, O_RDONLY);
- if (fd == -1) {
- fprintf(stderr, "Couldn't open '%s' FDT attribute to determine which flash device to use\n",
- fdt_flash_path);
- return -1;
- }
-
- rc = read(fd, fdt_node_path, sizeof(fdt_node_path) - 1);
- close(fd);
- if (rc == -1) {
- fprintf(stderr, "Couldn't read flash FDT node from '%s'\n", fdt_flash_path);
- return -1;
- }
- fdt_node_path[rc] = '\0';
-
- count = scandir(SYSFS_MTD_PATH, &namelist, NULL, alphasort);
- if (count == -1) {
- fprintf(stderr, "Couldn't scan '%s' for MTD devices\n", SYSFS_MTD_PATH);
- return -1;
- }
-
- rc = 0;
- done = false;
- for (i = 0; i < count; i++) {
- struct dirent *dirent;
- char dev_path[PATH_MAX] = SYSFS_MTD_PATH;
- char fdt_node_path_tmp[PATH_MAX];
-
- dirent = namelist[i];
- if (dirent->d_name[0] == '.' || rc || done) {
- free(namelist[i]);
- continue;
- }
-
- strncat(dev_path, dirent->d_name, sizeof(dev_path) - strlen(dev_path) - 2);
- strncat(dev_path, "/device/of_node", sizeof(dev_path) - strlen(dev_path) - 2);
-
- rc = readlink(dev_path, fdt_node_path_tmp, sizeof(fdt_node_path_tmp) - 1);
- if (rc == -1) {
- /*
- * This might fail because it could not exist if the system has flash
- * devices that present as mtd but don't have corresponding FDT
- * nodes, just continue silently.
- */
- free(namelist[i]);
- /* Should still try the next dir so reset rc */
- rc = 0;
- continue;
- }
- fdt_node_path_tmp[rc] = '\0';
-
- if (strstr(fdt_node_path_tmp, fdt_node_path)) {
- uint32_t flags, size;
-
- /*
- * size and flags could perhaps have be gotten another way but this
- * method is super unlikely to fail so it will do.
- */
-
- /* Check to see if device is writeable */
- rc = get_dev_attr(dirent->d_name, "flags", &flags);
- if (rc) {
- free(namelist[i]);
- continue;
- }
-
- /* Get the size of the mtd device while we're at it */
- rc = get_dev_attr(dirent->d_name, "size", &size);
- if (rc) {
- free(namelist[i]);
- continue;
- }
-
- strcpy(dev_path, "/dev/");
- strncat(dev_path, dirent->d_name, sizeof(dev_path) - strlen(dev_path) - 2);
- *r_path = strdup(dev_path);
- done = true;
- }
- free(namelist[i]);
- }
- free(namelist);
-
- if (!done)
- fprintf(stderr, "Couldn't find '%s' corresponding MTD\n", fdt_flash_path);
-
- /* explicit negative value so as to not return a libflash code */
- return done ? rc : -1;
-}
-
static bool is_valid_id(uint32_t record_id)
{
return record_id != CLEARED_RECORD_ID;
}
static int do_iterate(struct gard_ctx *ctx,
- int (*func)(struct gard_ctx *ctx, int pos,
- struct gard_record *gard, void *priv),
- void *priv)
+ int (*func)(struct gard_ctx *ctx, int pos,
+ struct gard_record *gard, void *priv),
+ void *priv)
{
int rc = 0;
unsigned int i;
@@ -667,10 +534,16 @@ struct {
{ "clear", "Clear GARD records", do_clear },
};
+static void print_version(void)
+{
+ printf("Open-Power GARD tool %s\n", version);
+}
+
static void usage(const char *progname)
{
unsigned int i;
+ print_version();
fprintf(stderr, "Usage: %s [-a -e -f <file> -p] <command> [<args>]\n\n",
progname);
fprintf(stderr, "-e --ecc\n\tForce reading/writing with ECC bytes.\n\n");
@@ -701,7 +574,6 @@ static const char *global_optstring = "+ef:p";
int main(int argc, char **argv)
{
const char *action, *progname;
- const char *fdt_flash_path = FDT_ACTIVE_FLASH_PATH;
char *filename = NULL;
struct gard_ctx _ctx, *ctx;
int rc, i = 0;
@@ -740,7 +612,8 @@ int main(int argc, char **argv)
break;
case '?':
usage(progname);
- return EXIT_FAILURE;
+ rc = EXIT_FAILURE;
+ goto out_free;
}
}
@@ -756,23 +629,21 @@ int main(int argc, char **argv)
/* do we have a command? */
if (optind == argc) {
usage(progname);
- return EXIT_FAILURE;
+ rc = EXIT_FAILURE;
+ goto out_free;
}
argc -= optind;
argv += optind;
action = argv[0];
- if (!filename) {
- rc = get_dev_mtd(fdt_flash_path, &filename);
- if (rc)
- return EXIT_FAILURE;
+ if (arch_flash_init(&(ctx->bl), filename, true)) {
+ /* Can fail for a few ways, most likely couldn't open MTD device */
+ fprintf(stderr, "Can't open %s\n", filename ? filename : "MTD Device. Are you root?");
+ rc = EXIT_FAILURE;
+ goto out_free;
}
- rc = file_init_path(filename, NULL, &(ctx->bl));
- if (rc)
- return EXIT_FAILURE;
-
rc = blocklevel_get_info(ctx->bl, NULL, &(ctx->f_size), NULL);
if (rc)
goto out;
@@ -816,7 +687,6 @@ int main(int argc, char **argv)
}
out:
- free(filename);
if (ctx->ffs)
ffs_close(ctx->ffs);
@@ -825,7 +695,8 @@ out:
if (i == ARRAY_SIZE(actions)) {
fprintf(stderr, "%s: '%s' isn't a valid command\n", progname, action);
usage(progname);
- return EXIT_FAILURE;
+ rc = EXIT_FAILURE;
+ goto out_free;
}
if (rc > 0) {
@@ -833,5 +704,8 @@ out:
if (filename && rc == FFS_ERR_BAD_MAGIC)
fprintf(stderr, "Maybe you didn't give a full flash image file?\nDid you mean '--part'?\n");
}
+
+out_free:
+ free(filename);
return rc;
}
diff --git a/external/gard/rules.mk b/external/gard/rules.mk
new file mode 100644
index 0000000..f0086a2
--- /dev/null
+++ b/external/gard/rules.mk
@@ -0,0 +1,39 @@
+.DEFAULT_GOAL := all
+
+override CFLAGS += -O2 -Wall -Werror -I.
+OBJS = version.o gard.o
+LIBFLASH_OBJS += libflash-file.o libflash-libflash.o libflash-libffs.o libflash-ecc.o libflash-blocklevel.o
+OBJS += $(LIBFLASH_OBJS)
+OBJS += common-arch_flash.o
+EXE = gard
+
+CC = $(CROSS_COMPILE)gcc
+
+prefix = /usr/local/
+sbindir = $(prefix)/sbin
+datadir = $(prefix)/share
+mandir = $(datadir)/man
+
+GARD_VERSION ?= $(shell ./make_version.sh $(EXE))
+
+version.c: make_version.sh .version
+ @(if [ "a$(GARD_VERSION)" = "a" ]; then \
+ echo "#error You need to set GARD_VERSION environment variable" > $@ ;\
+ else \
+ echo "const char version[] = \"$(GARD_VERSION)\";" ;\
+ fi) > $@
+
+%.o : %.c
+ $(CC) $(CFLAGS) -c $< -o $@
+
+$(LIBFLASH_OBJS): libflash-%.o : libflash/%.c
+ $(CC) $(CFLAGS) -c $< -o $@
+
+$(EXE): $(OBJS)
+ $(CC) $(CFLAGS) $^ -o $@
+
+install: all
+ install -D gard $(DESTDIR)$(sbindir)/opal-gard
+ install -D -m 0644 opal-gard.1 $(DESTDIR)$(mandir)/man1/opal-gard.1
+
+
diff --git a/external/gard/test/Makefile.check b/external/gard/test/Makefile.check
new file mode 100644
index 0000000..ec4993a
--- /dev/null
+++ b/external/gard/test/Makefile.check
@@ -0,0 +1,20 @@
+# -*-Makefile-*-
+
+check: check-gard
+
+#Makefile knows to build it before checking, should also
+#make clean before checking. If not, .o files for different
+#architectures might be lying around and clean once done to
+#avoid the opposite
+check-gard: gard-test-clean
+ @make CROSS_COMPILE='' -C external/gard/ check
+ @make CROSS_COMPILE='' -C external/gard/ clean
+
+.PHONY: check-gard
+
+clean: gard-test-clean
+
+gard-test-clean:
+ @make -C external/gard clean
+
+.PHONY: gard-test-clean
diff --git a/external/gard/test/files/data1.bin b/external/gard/test/files/data1.bin
new file mode 100644
index 0000000..aa5bf14
--- /dev/null
+++ b/external/gard/test/files/data1.bin
Binary files differ
diff --git a/external/gard/test/make-check-test b/external/gard/test/make-check-test
new file mode 100755
index 0000000..6b9e5db
--- /dev/null
+++ b/external/gard/test/make-check-test
@@ -0,0 +1 @@
+make -C external/gard/ check
diff --git a/external/gard/test/results/00-list.err b/external/gard/test/results/00-list.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/external/gard/test/results/00-list.err
diff --git a/external/gard/test/results/00-list.out b/external/gard/test/results/00-list.out
new file mode 100644
index 0000000..3fd3344
--- /dev/null
+++ b/external/gard/test/results/00-list.out
@@ -0,0 +1,5 @@
+| ID | Error | Type |
++---------------------------------------+
+| 00000001 | 90000015 | physical |
+| 00000002 | 90000016 | physical |
++=======================================+
diff --git a/external/gard/test/results/01-show_1.err b/external/gard/test/results/01-show_1.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/external/gard/test/results/01-show_1.err
diff --git a/external/gard/test/results/01-show_1.out b/external/gard/test/results/01-show_1.out
new file mode 100644
index 0000000..4e5726b
--- /dev/null
+++ b/external/gard/test/results/01-show_1.out
@@ -0,0 +1,9 @@
+Record ID: 0x00000001
+========================
+Error ID: 0x90000015
+Error Type: 0xe6
+Res Recovery: 0xff
+Path Type: physical
+>System, Instance #0
+ >Node, Instance #0
+ >Processor, Instance #0
diff --git a/external/gard/test/results/02-usage.err b/external/gard/test/results/02-usage.err
new file mode 100644
index 0000000..2aef39d
--- /dev/null
+++ b/external/gard/test/results/02-usage.err
@@ -0,0 +1,18 @@
+Usage: ./gard [-a -e -f <file> -p] <command> [<args>]
+
+-e --ecc
+ Force reading/writing with ECC bytes.
+
+-f --file <file>
+ Don't search for MTD device, read from <file>.
+
+-p --part
+ Used in conjunction with -f to specifythat just
+ the GUARD partition is in <file> and libffs
+ shouldn't be used.
+
+Where <command> is one of:
+
+ list List current GARD records
+ show Show details of a GARD record
+ clear Clear GARD records
diff --git a/external/gard/test/results/02-usage.out b/external/gard/test/results/02-usage.out
new file mode 100644
index 0000000..c862167
--- /dev/null
+++ b/external/gard/test/results/02-usage.out
@@ -0,0 +1 @@
+Open-Power GARD tool VERSION
diff --git a/external/gard/test/test-gard b/external/gard/test/test-gard
new file mode 100755
index 0000000..10da351
--- /dev/null
+++ b/external/gard/test/test-gard
@@ -0,0 +1,5 @@
+#! /bin/sh
+
+. test/test.sh
+
+run_tests "test/tests/*" "test/results"
diff --git a/external/gard/test/tests/00-list b/external/gard/test/tests/00-list
new file mode 100644
index 0000000..8e285c4
--- /dev/null
+++ b/external/gard/test/tests/00-list
@@ -0,0 +1,10 @@
+#! /bin/sh
+
+run_binary "./gard" "-p -e -f test/files/data1.bin list"
+if [ "$?" -ne 0 ] ; then
+ fail_test
+fi
+
+diff_with_result
+
+pass_test
diff --git a/external/gard/test/tests/01-show_1 b/external/gard/test/tests/01-show_1
new file mode 100644
index 0000000..66ee732
--- /dev/null
+++ b/external/gard/test/tests/01-show_1
@@ -0,0 +1,10 @@
+#! /bin/sh
+
+run_binary "./gard" "-p -e -f test/files/data1.bin show 1"
+if [ "$?" -ne 0 ] ; then
+ fail_test
+fi
+
+diff_with_result
+
+pass_test
diff --git a/external/gard/test/tests/02-usage b/external/gard/test/tests/02-usage
new file mode 100644
index 0000000..45e5e02
--- /dev/null
+++ b/external/gard/test/tests/02-usage
@@ -0,0 +1,12 @@
+#! /bin/sh
+
+run_binary "./gard"
+if [ "$?" -ne 1 ] ; then
+ fail_test
+fi
+
+strip_version_from_result "gard"
+
+diff_with_result
+
+pass_test
diff --git a/external/mambo/mambo_utils.tcl b/external/mambo/mambo_utils.tcl
index ae287dc..5e97302 100644
--- a/external/mambo/mambo_utils.tcl
+++ b/external/mambo/mambo_utils.tcl
@@ -4,25 +4,25 @@
#
proc p { reg { t 0 } { c 0 } } {
switch -regexp $reg {
- ^r[0-9]+$ {
- regexp "r(\[0-9\]*)" $reg dummy num
- set val [mysim cpu $c thread $t display gpr $num]
- puts "$val"
- }
- ^f[0-9]+$ {
- regexp "f(\[0-9\]*)" $reg dummy num
- set val [mysim cpu $c thread $t display fpr $num]
- puts "$val"
- }
- ^v[0-9]+$ {
- regexp "v(\[0-9\]*)" $reg dummy num
- set val [mysim cpu $c thread $t display vmxr $num]
- puts "$val"
- }
- default {
- set val [mysim cpu $c thread $t display spr $reg]
- puts "$val"
- }
+ ^r[0-9]+$ {
+ regexp "r(\[0-9\]*)" $reg dummy num
+ set val [mysim cpu $c thread $t display gpr $num]
+ puts "$val"
+ }
+ ^f[0-9]+$ {
+ regexp "f(\[0-9\]*)" $reg dummy num
+ set val [mysim cpu $c thread $t display fpr $num]
+ puts "$val"
+ }
+ ^v[0-9]+$ {
+ regexp "v(\[0-9\]*)" $reg dummy num
+ set val [mysim cpu $c thread $t display vmxr $num]
+ puts "$val"
+ }
+ default {
+ set val [mysim cpu $c thread $t display spr $reg]
+ puts "$val"
+ }
}
}
@@ -31,21 +31,21 @@ proc p { reg { t 0 } { c 0 } } {
#
proc sr { reg val {t 0}} {
switch -regexp $reg {
- ^r[0-9]+$ {
- regexp "r(\[0-9\]*)" $reg dummy num
- mysim cpu 0:$t set gpr $num $val
- }
- ^f[0-9]+$ {
- regexp "f(\[0-9\]*)" $reg dummy num
- mysim cpu 0:$t set fpr $num $val
- }
- ^v[0-9]+$ {
- regexp "v(\[0-9\]*)" $reg dummy num
- mysim cpu 0:$t set vmxr $num $val
- }
- default {
- mysim cpu 0:$t set spr $reg $val
- }
+ ^r[0-9]+$ {
+ regexp "r(\[0-9\]*)" $reg dummy num
+ mysim cpu 0:$t set gpr $num $val
+ }
+ ^f[0-9]+$ {
+ regexp "f(\[0-9\]*)" $reg dummy num
+ mysim cpu 0:$t set fpr $num $val
+ }
+ ^v[0-9]+$ {
+ regexp "v(\[0-9\]*)" $reg dummy num
+ mysim cpu 0:$t set vmxr $num $val
+ }
+ default {
+ mysim cpu 0:$t set spr $reg $val
+ }
}
p $reg $t
}
@@ -81,9 +81,9 @@ proc ipca { } {
set threads [myconf query processor/number_of_threads]
for { set i 0 } { $i < $cpus } { incr i 1 } {
- for { set j 0 } { $j < $threads } { incr j 1 } {
- puts [ipc $j $i]
- }
+ for { set j 0 } { $j < $threads } { incr j 1 } {
+ puts [ipc $j $i]
+ }
}
}
@@ -92,10 +92,10 @@ proc pa { spr } {
set threads [myconf query processor/number_of_threads]
for { set i 0 } { $i < $cpus } { incr i 1 } {
- for { set j 0 } { $j < $threads } { incr j 1 } {
- set val [mysim cpu $i thread $j display spr $spr]
- puts "CPU: $i THREAD: $j SPR $spr = $val"
- }
+ for { set j 0 } { $j < $threads } { incr j 1 } {
+ set val [mysim cpu $i thread $j display spr $spr]
+ puts "CPU: $i THREAD: $j SPR $spr = $val"
+ }
}
}
@@ -106,16 +106,16 @@ proc s { } {
proc z { count } {
while { $count > 0 } {
- s
- incr count -1
+ s
+ incr count -1
}
}
proc sample_pc { sample count } {
while { $count > 0 } {
- mysim cycle $sample
- ipc
- incr count -1
+ mysim cycle $sample
+ ipc
+ incr count -1
}
}
@@ -130,10 +130,10 @@ proc x { pa { size 8 } } {
}
proc it { ea } {
- mysim util itranslate $ea
+ mysim util itranslate $ea
}
proc dt { ea } {
- mysim util dtranslate $ea
+ mysim util dtranslate $ea
}
proc ex { ea { size 8 } } {
@@ -146,17 +146,17 @@ proc hexdump { location count } {
set addr [expr $location & 0xfffffffffffffff0]
set top [expr $addr + ($count * 15)]
for { set i $addr } { $i < $top } { incr i 16 } {
- set val [expr $i + (4 * 0)]
- set val0 [format "%08x" [mysim memory display $val 4]]
- set val [expr $i + (4 * 1)]
- set val1 [format "%08x" [mysim memory display $val 4]]
- set val [expr $i + (4 * 2)]
- set val2 [format "%08x" [mysim memory display $val 4]]
- set val [expr $i + (4 * 3)]
- set val3 [format "%08x" [mysim memory display $val 4]]
- set ascii "(none)"
- set loc [format "0x%016x" $i]
- puts "$loc: $val0 $val1 $val2 $val3 $ascii"
+ set val [expr $i + (4 * 0)]
+ set val0 [format "%08x" [mysim memory display $val 4]]
+ set val [expr $i + (4 * 1)]
+ set val1 [format "%08x" [mysim memory display $val 4]]
+ set val [expr $i + (4 * 2)]
+ set val2 [format "%08x" [mysim memory display $val 4]]
+ set val [expr $i + (4 * 3)]
+ set val3 [format "%08x" [mysim memory display $val 4]]
+ set ascii "(none)"
+ set loc [format "0x%016x" $i]
+ puts "$loc: $val0 $val1 $val2 $val3 $ascii"
}
}
@@ -186,18 +186,18 @@ proc st { count } {
set lr [mysim cpu 0 display spr lr]
i $lr
while { $count > 0 } {
- set sp [mysim util itranslate $sp]
- set lr [mysim memory display [expr $sp++16] 8]
- i $lr
- set sp [mysim memory display $sp 8]
+ set sp [mysim util itranslate $sp]
+ set lr [mysim memory display [expr $sp++16] 8]
+ i $lr
+ set sp [mysim memory display $sp 8]
- incr count -1
+ incr count -1
}
}
proc mywatch { } {
while { [mysim memory display 0x700 8] != 0 } {
- mysim cycle 1
+ mysim cycle 1
}
puts "condition occured "
ipc
@@ -222,23 +222,23 @@ proc egdb { {t 0} } {
proc bt { {sp 0} } {
set t 0
if { $sp < 16 } {
- set t $sp
- set sp 0
+ set t $sp
+ set sp 0
}
if { $sp == 0 } {
- set sp [mysim cpu 0:$t display gpr 1]
+ set sp [mysim cpu 0:$t display gpr 1]
}
set lr [mysim cpu 0:$t display spr lr]
puts "backtrace thread $t, stack $sp"
i $lr
while { 1 == 1 } {
- set pa [ mysim util dtranslate $sp ]
- set bc [ mysim memory display $pa 8 ]
- set cr [ mysim memory display [expr $pa+8] 8 ]
- set lr [ mysim memory display [expr $pa+16] 8 ]
- i $lr
- if { $bc == 0 } { return }
- set sp $bc
+ set pa [ mysim util dtranslate $sp ]
+ set bc [ mysim memory display $pa 8 ]
+ set cr [ mysim memory display [expr $pa+8] 8 ]
+ set lr [ mysim memory display [expr $pa+16] 8 ]
+ i $lr
+ if { $bc == 0 } { return }
+ set sp $bc
}
}
diff --git a/external/mambo/skiboot.tcl b/external/mambo/skiboot.tcl
index 2d14be6..d3b4120 100644
--- a/external/mambo/skiboot.tcl
+++ b/external/mambo/skiboot.tcl
@@ -17,12 +17,15 @@ mconfig stop_on_ill MAMBO_STOP_ON_ILL false
# Location of application binary to load
mconfig boot_image SKIBOOT ../../skiboot.lid
+if { [info exists env(SKIBOOT)] } {
+ mconfig boot_image SKIBOOT env(SKIBOOT)
+}
# Boot: Memory location to load boot_image, for binary or vmlinux
-mconfig boot_load MAMBO_BOOT_LOAD 0
+mconfig boot_load MAMBO_BOOT_LOAD 0x30000000
# Boot: Value of PC after loading, for binary or vmlinux
-mconfig boot_pc MAMBO_BOOT_PC 0x10
+mconfig boot_pc MAMBO_BOOT_PC 0x30000010
# Payload: Allow for a Linux style ramdisk/initrd
if { ![info exists env(SKIBOOT_ZIMAGE)] } {
@@ -58,8 +61,10 @@ mconfig tap_base MAMBO_NET_TAP_BASE 0
#
# Create machine config
#
-
-define dup pegasus myconf
+if { ! [info exists env(SIMHOST)] } {
+ set env(SIMHOST) "pegasus"
+}
+define dup $env(SIMHOST) myconf
myconf config processor/number_of_threads $mconf(threads)
myconf config memory_size $mconf(memory)
myconf config processor_option/ATTN_STOP true
@@ -71,6 +76,15 @@ myconf config processor/cpu_frequency 512M
myconf config processor/timebase_frequency 1/1
myconf config enable_pseries_nvram false
+# We need to be DD2 or greater on p8 for the HILE HID bit.
+if { $env(SIMHOST) == "pegasus" } {
+ myconf config processor/initial/PVR 0x4b0201
+}
+
+if { [info exists env(SKIBOOT_SIMCONF)] } {
+ source $env(SKIBOOT_SIMCONF)
+}
+
define machine myconf mysim
#
@@ -137,6 +151,8 @@ mysim of addprop $cpus_node int "#size-cells" 0
set cpu0_node [mysim of find_device "/cpus/PowerPC@0"]
mysim of addprop $cpu0_node int "ibm,chip-id" 0
+set reg [list 0x0000001c00000028 0xffffffffffffffff]
+mysim of addprop $cpu0_node array64 "ibm,processor-segment-sizes" reg
set mem0_node [mysim of find_device "/memory@0"]
mysim of addprop $mem0_node int "ibm,chip-id" 0
@@ -154,6 +170,17 @@ lappend compat "ibm,power8-xscom"
set compat [of::encode_compat $compat]
mysim of addprop $xscom_node byte_array "compatible" $compat
+if { [info exists env(SKIBOOT_INITRD)] } {
+ set cpio_file $env(SKIBOOT_INITRD)
+ set chosen_node [mysim of find_device /chosen]
+ set cpio_size [file size $cpio_file]
+ set cpio_start 0x10000000
+ set cpio_end [expr $cpio_start + $cpio_size]
+ mysim of addprop $chosen_node int "linux,initrd-start" $cpio_start
+ mysim of addprop $chosen_node int "linux,initrd-end" $cpio_end
+ mysim mcm 0 memory fread $cpio_start $cpio_size $cpio_file
+}
+
# Flatten it
epapr::of2dtb mysim $mconf(epapr_dt_addr)
@@ -171,9 +198,12 @@ mysim memory fread $mconf(payload_addr) $payload_size $mconf(payload)
for { set i 0 } { $i < $mconf(threads) } { incr i } {
mysim mcm 0 cpu 0 thread $i set spr pc $mconf(boot_pc)
mysim mcm 0 cpu 0 thread $i set gpr 3 $mconf(epapr_dt_addr)
- mysim mcm 0 cpu 0 thread $i set spr pvr 0x4b0201
mysim mcm 0 cpu 0 thread $i config_on
}
# Turbo mode & run
mysim mode turbo
+
+if { [info exists env(SKIBOOT_AUTORUN)] } {
+ mysim go
+}
diff --git a/external/opal-prd/Makefile b/external/opal-prd/Makefile
index db38d71..98c7b48 100644
--- a/external/opal-prd/Makefile
+++ b/external/opal-prd/Makefile
@@ -10,6 +10,11 @@ sbindir = $(prefix)/sbin
datadir = $(prefix)/share
mandir = $(datadir)/man
+all: opal-prd
+
+GET_ARCH = ../../external/common/get_arch.sh
+include ../../external/common/rules.mk
+
# Use make V=1 for a verbose build.
ifndef V
Q_CC= @echo ' CC ' $@;
@@ -18,12 +23,14 @@ ifndef V
Q_MKDIR=@echo ' MKDIR ' $@;
endif
-OBJS = opal-prd.o thunk.o pnor.o i2c.o module.o version.o \
- blocklevel.o libffs.o libflash.o ecc.o
+LIBFLASH_OBJS = libflash-blocklevel.o libflash-libffs.o \
+ libflash-libflash.o libflash-ecc.o \
+ libflash-file.o
-all: opal-prd
+OBJS = opal-prd.o thunk.o pnor.o i2c.o module.o version.o \
+ $(LIBFLASH_OBJS) common-arch_flash.o
-LINKS = ccan
+LINKS = ccan common libflash $(ARCH_LINKS)
OPAL_PRD_VERSION ?= $(shell ../../make_version.sh opal-prd)
@@ -34,6 +41,12 @@ endif
ccan:
$(Q_LN)ln -sfr ../../ccan ./ccan
+libflash:
+ $(Q_LN)ln -sfr ../../libflash ./libflash
+
+common:
+ $(Q_LN)ln -sfr ../common ./common
+
asm/opal-prd.h:
$(Q_MKDIR)mkdir -p asm
$(Q_LN)ln -sfr $(KERNEL_DIR)/arch/powerpc/include/uapi/asm/opal-prd.h \
@@ -44,7 +57,7 @@ $(OBJS): $(LINKS)
%.o: %.c
$(Q_CC)$(COMPILE.c) $< -o $@
-%.o: ../../libflash/%.c
+$(LIBFLASH_OBJS): libflash-%.o : libflash/%.c
$(Q_CC)$(COMPILE.c) $< -o $@
%.o: %.S
@@ -66,9 +79,9 @@ version.c: ../../make_version.sh .version
@cmp -s $@ $@.tmp || cp $@.tmp $@
@rm -f $@.tmp
-test: test/test_pnor
+test: $(LINKS) test/test_pnor
-test/test_pnor: test/test_pnor.o pnor.o libflash/libflash.o libflash/libffs.o
+test/test_pnor: test/test_pnor.o pnor.o $(LIBFLASH_OBJS) common-arch_flash.o
$(Q_LINK)$(LINK.o) -o $@ $^
install: all
diff --git a/external/opal-prd/opal-prd.c b/external/opal-prd/opal-prd.c
index c58ee51..f80e74c 100644
--- a/external/opal-prd/opal-prd.c
+++ b/external/opal-prd/opal-prd.c
@@ -631,7 +631,7 @@ int hservice_memory_error(uint64_t i_start_addr, uint64_t i_endAddr,
return 0;
}
-void hservices_init(struct opal_prd_ctx *ctx, void *code)
+int hservices_init(struct opal_prd_ctx *ctx, void *code)
{
uint64_t *s, *d;
int i, sz;
@@ -644,12 +644,19 @@ void hservices_init(struct opal_prd_ctx *ctx, void *code)
hbrt_entry.addr = (void *)htobe64((unsigned long)code + 0x100);
hbrt_entry.toc = 0; /* No toc for init entry point */
- if (memcmp(code, "HBRTVERS", 8) != 0)
+ if (memcmp(code, "HBRTVERS", 8) != 0) {
pr_log(LOG_ERR, "IMAGE: Bad signature for "
"ibm,hbrt-code-image! exiting");
+ return -1;
+ }
pr_debug("IMAGE: calling ibm,hbrt_init()");
hservice_runtime = call_hbrt_init(&hinterface);
+ if (!hservice_runtime) {
+ pr_log(LOG_ERR, "IMAGE: hbrt_init failed, exiting");
+ return -1;
+ }
+
pr_log(LOG_NOTICE, "IMAGE: hbrt_init complete, version %016lx",
hservice_runtime->interface_version);
@@ -659,6 +666,8 @@ void hservices_init(struct opal_prd_ctx *ctx, void *code)
/* Byte swap the function pointers */
for (i = 0; i < sz; i++)
d[i] = be64toh(s[i]);
+
+ return 0;
}
static void fixup_hinterface_table(void)
@@ -1537,6 +1546,8 @@ static int run_prd_daemon(struct opal_prd_ctx *ctx)
/* log to syslog */
pr_log_daemon_init();
+ pr_debug("CTRL: Starting PRD daemon\n");
+
ctx->fd = -1;
ctx->socket = -1;
@@ -1592,18 +1603,20 @@ static int run_prd_daemon(struct opal_prd_ctx *ctx)
fixup_hinterface_table();
- if (ctx->pnor.path) {
- rc = pnor_init(&ctx->pnor);
- if (rc) {
- pr_log(LOG_ERR, "PNOR: Failed to open pnor: %m");
- goto out_close;
- }
+ rc = pnor_init(&ctx->pnor);
+ if (rc) {
+ pr_log(LOG_ERR, "PNOR: Failed to open pnor: %m");
+ goto out_close;
}
ipmi_init(ctx);
pr_debug("HBRT: calling hservices_init");
- hservices_init(ctx, ctx->code_addr);
+ rc = hservices_init(ctx, ctx->code_addr);
+ if (rc) {
+ pr_log(LOG_ERR, "HBRT: Can't initiliase HBRT");
+ goto out_close;
+ }
pr_debug("HBRT: hservices_init done");
/* Test a scom */
@@ -1619,6 +1632,7 @@ static int run_prd_daemon(struct opal_prd_ctx *ctx)
rc = 0;
out_close:
+ pr_debug("CTRL: stopping PRD daemon\n");
pnor_close(&ctx->pnor);
if (ctx->fd != -1)
close(ctx->fd);
diff --git a/external/opal-prd/opal-prd.service b/external/opal-prd/opal-prd.service
new file mode 100644
index 0000000..ce4f47a
--- /dev/null
+++ b/external/opal-prd/opal-prd.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=OPAL PRD daemon
+Requires=opal-prd.socket
+
+[Service]
+StandardInput=socket
+ExecStart=/usr/sbin/opal-prd --pnor /dev/mtd0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/external/opal-prd/opal-prd.socket b/external/opal-prd/opal-prd.socket
new file mode 100644
index 0000000..e51b9d3
--- /dev/null
+++ b/external/opal-prd/opal-prd.socket
@@ -0,0 +1,8 @@
+[Unit]
+Description=OPAL PRD Listen Socket
+
+[Socket]
+ListenStream=/run/opal-prd-control
+
+[Install]
+WantedBy=sockets.target
diff --git a/external/opal-prd/pnor.c b/external/opal-prd/pnor.c
index 43814ab..0e7e5c0 100644
--- a/external/opal-prd/pnor.c
+++ b/external/opal-prd/pnor.c
@@ -15,6 +15,8 @@
*/
#include <libflash/libffs.h>
+#include <common/arch_flash.h>
+
#include <errno.h>
#include <sys/stat.h>
@@ -32,49 +34,34 @@
int pnor_init(struct pnor *pnor)
{
- int rc, fd;
- mtd_info_t mtd_info;
+ int rc;
if (!pnor)
return -1;
- /* Open device and ffs */
- fd = open(pnor->path, O_RDWR);
- if (fd < 0) {
- perror(pnor->path);
+ rc = arch_flash_init(&(pnor->bl), pnor->path, false);
+ if (rc) {
+ pr_log(LOG_ERR, "PNOR: Flash init failed");
return -1;
}
- /* Hack so we can test on non-mtd file descriptors */
-#if defined(__powerpc__)
- rc = ioctl(fd, MEMGETINFO, &mtd_info);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: ioctl failed to get pnor info: %m");
- goto out;
- }
- pnor->size = mtd_info.size;
- pnor->erasesize = mtd_info.erasesize;
-#else
- pnor->size = lseek(fd, 0, SEEK_END);
- if (pnor->size < 0) {
- perror(pnor->path);
+ rc = blocklevel_get_info(pnor->bl, NULL, &(pnor->size), &(pnor->erasesize));
+ if (rc) {
+ pr_log(LOG_ERR, "PNOR: blocklevel_get_info() failed. Can't use PNOR");
goto out;
}
- /* Fake it */
- pnor->erasesize = 1024;
-#endif
-
- pr_debug("PNOR: Found PNOR: %d bytes (%d blocks)", pnor->size,
- pnor->erasesize);
- rc = ffs_open_image(fd, pnor->size, 0, &pnor->ffsh);
- if (rc)
+ rc = ffs_init(0, pnor->size, pnor->bl, &pnor->ffsh, 0);
+ if (rc) {
pr_log(LOG_ERR, "PNOR: Failed to open pnor partition table");
+ goto out;
+ }
+ return 0;
out:
- close(fd);
-
- return rc;
+ arch_flash_close(pnor->bl, pnor->path);
+ pnor->bl = NULL;
+ return -1;
}
void pnor_close(struct pnor *pnor)
@@ -85,6 +72,9 @@ void pnor_close(struct pnor *pnor)
if (pnor->ffsh)
ffs_close(pnor->ffsh);
+ if (pnor->bl)
+ arch_flash_close(pnor->bl, pnor->path);
+
if (pnor->path)
free(pnor->path);
}
@@ -107,159 +97,36 @@ void dump_parts(struct ffs_handle *ffs) {
}
}
-static int mtd_write(struct pnor *pnor, int fd, void *data, uint64_t offset,
+static int mtd_write(struct pnor *pnor, void *data, uint64_t offset,
size_t len)
{
- int write_start, write_len, start_waste, rc;
- bool end_waste = false;
- uint8_t *buf;
- struct erase_info_user erase;
+ int rc;
if (len > pnor->size || offset > pnor->size ||
len + offset > pnor->size)
return -ERANGE;
- start_waste = offset % pnor->erasesize;
- write_start = offset - start_waste;
-
- /* Align size to multiple of block size */
- write_len = (len + start_waste) & ~(pnor->erasesize - 1);
- if ((len + start_waste) > write_len) {
- end_waste = true;
- write_len += pnor->erasesize;
- }
-
- buf = malloc(write_len);
-
- if (start_waste) {
- rc = lseek(fd, write_start, SEEK_SET);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: lseek write_start(0x%x) "
- "failed; %m", write_start);
- goto out;
- }
-
- rc = read(fd, buf, pnor->erasesize);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: read(0x%x bytes) failed: %m",
- pnor->erasesize);
- goto out;
- }
- }
-
- if (end_waste) {
- rc = lseek(fd, write_start + write_len - pnor->erasesize,
- SEEK_SET);
- if (rc < 0) {
- perror("lseek last write block");
- pr_log(LOG_ERR, "PNOR: lseek last write block(0x%x) "
- "failed; %m",
- write_start + write_len -
- pnor->erasesize);
- goto out;
- }
-
- rc = read(fd, buf + write_len - pnor->erasesize, pnor->erasesize);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: read(0x%x bytes) failed: %m",
- pnor->erasesize);
- goto out;
- }
- }
-
- /* Put data in the correct spot */
- memcpy(buf + start_waste, data, len);
-
- /* Not sure if this is required */
- rc = lseek(fd, 0, SEEK_SET);
- if (rc < 0) {
- pr_log(LOG_NOTICE, "PNOR: lseek(0) failed: %m");
- goto out;
- }
-
- /* Erase */
- erase.start = write_start;
- erase.length = write_len;
-
- rc = ioctl(fd, MEMERASE, &erase);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: erase(start 0x%x, len 0x%x) ioctl "
- "failed: %m", write_start, write_len);
- goto out;
- }
-
- /* Write */
- rc = lseek(fd, write_start, SEEK_SET);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: lseek write_start(0x%x) failed: %m",
- write_start);
- goto out;
- }
-
- rc = write(fd, buf, write_len);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: write(0x%x bytes) failed: %m",
- write_len);
- goto out;
- }
-
- /* We have succeded, report the requested write size */
- rc = len;
+ rc = blocklevel_smart_write(pnor->bl, offset, data, len);
+ if (rc)
+ return -errno;
-out:
- free(buf);
- return rc;
+ return len;
}
-static int mtd_read(struct pnor *pnor, int fd, void *data, uint64_t offset,
+static int mtd_read(struct pnor *pnor, void *data, uint64_t offset,
size_t len)
{
- int read_start, read_len, start_waste, rc;
- int mask = pnor->erasesize - 1;
- void *buf;
+ int rc;
if (len > pnor->size || offset > pnor->size ||
len + offset > pnor->size)
return -ERANGE;
- /* Align start to erase block size */
- start_waste = offset % pnor->erasesize;
- read_start = offset - start_waste;
-
- /* Align size to multiple of block size */
- read_len = (len + start_waste) & ~mask;
- if ((len + start_waste) > read_len)
- read_len += pnor->erasesize;
-
- /* Ensure read is not out of bounds */
- if (read_start + read_len > pnor->size) {
- pr_log(LOG_ERR, "PNOR: read out of bounds");
- return -ERANGE;
- }
-
- buf = malloc(read_len);
-
- rc = lseek(fd, read_start, SEEK_SET);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: lseek read_start(0x%x) failed: %m",
- read_start);
- goto out;
- }
-
- rc = read(fd, buf, read_len);
- if (rc < 0) {
- pr_log(LOG_ERR, "PNOR: write(offset 0x%x, len 0x%x) "
- "failed: %m", read_start, read_len);
- goto out;
- }
+ rc = blocklevel_read(pnor->bl, offset, data, len);
+ if (rc)
+ return -errno;
- /* Copy data into destination, carefully avoiding the extra data we
- * added to align to block size */
- memcpy(data, buf + start_waste, len);
- rc = len;
-out:
- free(buf);
- return rc;
+ return len;
}
/* Similar to read(2), this performs partial operations where the number of
@@ -269,7 +136,7 @@ out:
int pnor_operation(struct pnor *pnor, const char *name, uint64_t offset,
void *data, size_t requested_size, enum pnor_op op)
{
- int rc, fd;
+ int rc;
uint32_t pstart, psize, idx;
int size;
@@ -315,18 +182,12 @@ int pnor_operation(struct pnor *pnor, const char *name, uint64_t offset,
return -ERANGE;
}
- fd = open(pnor->path, O_RDWR);
- if (fd < 0) {
- perror(pnor->path);
- return fd;
- }
-
switch (op) {
case PNOR_OP_READ:
- rc = mtd_read(pnor, fd, data, pstart + offset, size);
+ rc = mtd_read(pnor, data, pstart + offset, size);
break;
case PNOR_OP_WRITE:
- rc = mtd_write(pnor, fd, data, pstart + offset, size);
+ rc = mtd_write(pnor, data, pstart + offset, size);
break;
default:
rc = -EIO;
@@ -342,7 +203,5 @@ int pnor_operation(struct pnor *pnor, const char *name, uint64_t offset,
rc, size);
out:
- close(fd);
-
return rc;
}
diff --git a/external/opal-prd/pnor.h b/external/opal-prd/pnor.h
index 06219dc..729a969 100644
--- a/external/opal-prd/pnor.h
+++ b/external/opal-prd/pnor.h
@@ -2,12 +2,14 @@
#define PNOR_H
#include <libflash/libffs.h>
+#include <libflash/blocklevel.h>
struct pnor {
char *path;
struct ffs_handle *ffsh;
uint32_t size;
uint32_t erasesize;
+ struct blocklevel_device *bl;
};
enum pnor_op {
diff --git a/external/opal-prd/test/test_pnor.c b/external/opal-prd/test/test_pnor.c
index f4b0a6d..84e4231 100644
--- a/external/opal-prd/test/test_pnor.c
+++ b/external/opal-prd/test/test_pnor.c
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -25,6 +26,15 @@
extern void dump_parts(struct ffs_handle *ffs);
+void pr_log(int priority, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
int main(int argc, char **argv)
{
struct pnor pnor;
diff --git a/external/pflash/.gitignore b/external/pflash/.gitignore
new file mode 100644
index 0000000..edec7db
--- /dev/null
+++ b/external/pflash/.gitignore
@@ -0,0 +1,5 @@
+ccan
+common
+libflash
+make_version.sh
+pflash
diff --git a/external/pflash/Makefile b/external/pflash/Makefile
index f0c3773..9d7c183 100644
--- a/external/pflash/Makefile
+++ b/external/pflash/Makefile
@@ -1,3 +1,11 @@
+# Use make V=1 for a verbose build.
+ifndef V
+ Q_CC= @echo ' CC ' $@;
+ Q_LD= @echo ' LD ' $@;
+ Q_LN= @echo ' LN ' $@;
+ Q_MKDIR=@echo ' MKDIR ' $@;
+endif
+
include rules.mk
GET_ARCH = ../../external/common/get_arch.sh
include ../../external/common/rules.mk
@@ -5,19 +13,16 @@ include ../../external/common/rules.mk
all: $(EXE)
.PHONY: links
-links: libflash ccan common make_version.sh
+links: libflash ccan common
libflash:
- ln -sf ../../libflash .
+ $(Q_LN)ln -sf ../../libflash .
ccan:
- ln -sf ../../ccan .
+ $(Q_LN)ln -sf ../../ccan .
common:
- ln -sf ../common .
-
-make_version.sh:
- ln -sf ../../make_version.sh
+ $(Q_LN)ln -sf ../common .
$(OBJS): | links arch_links
@@ -27,6 +32,9 @@ $(OBJS): | links arch_links
@cmp -s $@ $@.tmp || cp $@.tmp $@
@rm -f $@.tmp
+install: all
+ install -D pflash $(DESTDIR)$(sbindir)/pflash
+
.PHONY: dist
#File is named $(PFLASH_VERSION).tar because the expectation is that pflash-
#is always at the start of the verion. This remains consistent with skiboot
@@ -38,9 +46,11 @@ dist: links .version
../pflash/.version ../pflash/make_version.sh \
../pflash/common/*
+.PHONY: clean
clean: arch_clean
- rm -f $(OBJS) $(EXE) *.o *.d libflash/test/test_flash libflash/test/*.o
+ rm -f $(OBJS) $(EXE) *.o *.d
+.PHONY: distclean
distclean: clean
rm -f *.c~ *.h~ *.sh~ Makefile~ config.mk~ libflash/*.c~ libflash/*.h~
rm -f libflash ccan .version .version.tmp
- rm -f common io.h make_version.sh
+ rm -f common io.h
diff --git a/external/pflash/ast.h b/external/pflash/ast.h
deleted file mode 100644
index 1910eb4..0000000
--- a/external/pflash/ast.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef __AST_H
-#define __AST_H
-
-/*
- * AHB bus registers
- */
-
-/* SPI Flash controller #1 (BMC) */
-#define BMC_SPI_FCTL_BASE 0x1E620000
-#define BMC_SPI_FCTL_CTRL (BMC_SPI_FCTL_BASE + 0x10)
-#define BMC_SPI_FREAD_TIMING (BMC_SPI_FCTL_BASE + 0x94)
-#define BMC_FLASH_BASE 0x20000000
-
-/* SPI Flash controller #2 (PNOR) */
-#define PNOR_SPI_FCTL_BASE 0x1E630000
-#define PNOR_SPI_FCTL_CONF (PNOR_SPI_FCTL_BASE + 0x00)
-#define PNOR_SPI_FCTL_CTRL (PNOR_SPI_FCTL_BASE + 0x04)
-#define PNOR_SPI_FREAD_TIMING (PNOR_SPI_FCTL_BASE + 0x14)
-#define PNOR_FLASH_BASE 0x30000000
-
-/* LPC registers */
-#define LPC_BASE 0x1e789000
-#define LPC_HICR6 (LPC_BASE + 0x80)
-#define LPC_HICR7 (LPC_BASE + 0x88)
-#define LPC_HICR8 (LPC_BASE + 0x8c)
-
-/* SCU registers */
-#define SCU_BASE 0x1e6e2000
-#define SCU_HW_STRAPPING (SCU_BASE + 0x70)
-
-/*
- * AHB Accessors
- */
-#ifndef __SKIBOOT__
-#include "common/io.h"
-#else
-
-/*
- * Register accessors, return byteswapped values
- * (IE. LE registers)
- */
-void ast_ahb_writel(uint32_t val, uint32_t reg);
-uint32_t ast_ahb_readl(uint32_t reg);
-
-/*
- * copy to/from accessors. Cannot cross IDSEL boundaries (256M)
- */
-int ast_copy_to_ahb(uint32_t reg, const void *src, uint32_t len);
-int ast_copy_from_ahb(void *dst, uint32_t reg, uint32_t len);
-
-void ast_io_init(void);
-
-#endif /* __SKIBOOT__ */
-
-/*
- * SPI Flash controllers
- */
-#define AST_SF_TYPE_PNOR 0
-#define AST_SF_TYPE_BMC 1
-#define AST_SF_TYPE_MEM 2
-
-struct spi_flash_ctrl;
-int ast_sf_open(uint8_t type, struct spi_flash_ctrl **ctrl);
-void ast_sf_close(struct spi_flash_ctrl *ctrl);
-
-
-#endif /* __AST_H */
diff --git a/external/pflash/build-all-arch.sh b/external/pflash/build-all-arch.sh
new file mode 100755
index 0000000..5974fbc
--- /dev/null
+++ b/external/pflash/build-all-arch.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# Script to build all pflash backends
+#
+# Copyright 2015 IBM Corp.
+# Licensed under the Apache License, Version 2.0
+#
+# pflash has three different backends that are used on powerpc, arm (BMC) and
+# x86 (file-backed). In order to test for regressions when touching shared code
+# such as libflash.
+#
+# Defaults to the cross compilers available under Ubuntu. You can set the
+# environment variables arm_cc, amd64_cc, ppc64le_cc for other distributions.
+#
+# installing on x86:
+# apt-get install gcc-arm-linux-gnueabi gcc-powerpc64le-linux-gnu gcc
+#
+
+arm_cc=${arm_cc:-arm-linux-gnueabi-}
+amd64_cc=${amd64_cc:-x86_64-linux-gnu-}
+ppc64le_cc=${ppc64le_cc:-powerpc64le-linux-gnu-}
+
+echo "Building for ARM..."
+make clean && make distclean
+CROSS_COMPILE=${arm_cc} make || { echo "ARM build failed"; exit 1; }
+
+echo "Building for x86..."
+make clean && make distclean
+CROSS_COMPILE=${amd64_cc} make || { echo "x86 build failed"; exit 1; }
+
+echo "Building for ppc64le..."
+make clean && make distclean
+CROSS_COMPILE=${ppc64le_cc} make || { echo "ppc64le build failed"; exit 1; }
+
+make clean && make distclean
diff --git a/external/pflash/pflash.c b/external/pflash/pflash.c
index 057506e..c1d4949 100644
--- a/external/pflash/pflash.c
+++ b/external/pflash/pflash.c
@@ -317,6 +317,7 @@ static void program_file(const char *file, uint32_t start, uint32_t size)
if (dummy_run) {
printf("skipped (dummy)\n");
+ close(fd);
return;
}
@@ -721,7 +722,7 @@ int main(int argc, char *argv[])
}
}
- if (arch_flash_init(&bl, NULL))
+ if (arch_flash_init(&bl, NULL, true))
exit(1);
on_exit(exiting, NULL);
diff --git a/external/pflash/rules.mk b/external/pflash/rules.mk
index d60e276..219e3d3 100644
--- a/external/pflash/rules.mk
+++ b/external/pflash/rules.mk
@@ -1,17 +1,19 @@
.DEFAULT_GOAL := all
-CFLAGS = -O2 -Wall -I.
-LDFLAGS = -lrt
+override CFLAGS += -O2 -Wall -I.
OBJS = pflash.o progress.o version.o
-OBJS += libflash/libflash.o libflash/libffs.o libflash/ecc.o libflash/blocklevel.o libflash/file.o
-OBJS += common/arch_flash.o
+LIBFLASH_FILES := libflash.c libffs.c ecc.c blocklevel.c file.c
+LIBFLASH_OBJS := $(addprefix libflash-, $(LIBFLASH_FILES:.c=.o))
+LIBFLASH_SRC := $(addprefix libflash/,$(LIBFLASH_FILES))
+OBJS += $(LIBFLASH_OBJS)
+OBJS += common-arch_flash.o
EXE = pflash
CC = $(CROSS_COMPILE)gcc
-PFLASH_VERSION ?= $(shell ./make_version.sh $(EXE))
+PFLASH_VERSION ?= $(shell ../../make_version.sh $(EXE))
-version.c: make_version.sh .version
+version.c: .version
@(if [ "a$(PFLASH_VERSION)" = "a" ]; then \
echo "#error You need to set PFLASH_VERSION environment variable" > $@ ;\
else \
@@ -19,8 +21,13 @@ version.c: make_version.sh .version
fi) > $@
%.o : %.c
- $(CC) $(CFLAGS) -c $< -o $@
+ $(Q_CC)$(CC) $(CFLAGS) -c $< -o $@
+
+$(LIBFLASH_SRC): | links
+
+$(LIBFLASH_OBJS): libflash-%.o : libflash/%.c
+ $(Q_CC)$(CC) $(CFLAGS) -c $< -o $@
$(EXE): $(OBJS)
- $(CC) $(CFLAGS) $^ $(LDFLAGS) -o $@
+ $(Q_CC)$(CC) $(CFLAGS) $^ -lrt -o $@
diff --git a/external/shared/Makefile b/external/shared/Makefile
new file mode 100644
index 0000000..ffc049f
--- /dev/null
+++ b/external/shared/Makefile
@@ -0,0 +1,57 @@
+.DEFAULT_GOAL := all
+CC ?= $(CROSS_COMPILE)gcc
+GET_ARCH = ../../external/common/get_arch.sh
+include ../../external/common/rules.mk
+
+PREFIX ?= /usr/local/
+LIBDIR = $(PREFIX)/lib
+INCDIR = $(PREFIX)/include/libflash
+
+VERSION = $(shell ../../make_version.sh)
+
+CFLAGS += -m64 -Werror -Wall -g2 -ggdb -I. -fPIC
+
+.PHONY: links
+links: libflash ccan common
+
+libflash:
+ ln -sf ../../libflash .
+
+common:
+ ln -sf ../common .
+
+ccan:
+ ln -sf ../../ccan .
+
+LIBFLASH_OBJS = libflash-file.o libflash-libflash.o libflash-libffs.o libflash-ecc.o libflash-blocklevel.o
+ARCHFLASH_OBJS = common-arch_flash.o
+OBJS = $(LIBFLASH_OBJS) $(ARCHFLASH_OBJS)
+
+LIBFLASH_H = libflash/file.h libflash/libflash.h libflash/libffs.h libflash/ffs.h libflash/ecc.h libflash/blocklevel.h libflash/errors.h
+ARCHFLASH_H = common/arch_flash.h
+
+$(LIBFLASH_OBJS) : libflash-%.o : libflash/%.c
+ $(CC) $(CFLAGS) $(CPPFLAGS) -c $< -o $@
+
+clean:
+ rm -f $(OBJS) common-*.o *.so*
+
+distclean: clean
+ rm -f ccan libflash common
+
+all: links arch_links $(OBJS)
+ $(CC) -shared -Wl,-soname,libflash.so -o libflash.so.$(VERSION) $(OBJS)
+
+install-lib: all
+ install -D -m 0755 libflash.so.$(VERSION) $(LIBDIR)/libflash.so.$(VERSION)
+ ln -sf libflash.so.$(VERSION) $(LIBDIR)/libflash.so
+
+install-dev: links arch_links
+ mkdir -p $(INCDIR)
+ install -m 0644 $(LIBFLASH_H) $(ARCHFLASH_H) $(INCDIR)
+
+install: install-lib install-dev
+
+uninstall:
+ rm -f $(LIBDIR)/libflash*
+ rm -rf $(INCDIR)
diff --git a/external/shared/config.h b/external/shared/config.h
new file mode 100644
index 0000000..a132a01
--- /dev/null
+++ b/external/shared/config.h
@@ -0,0 +1,19 @@
+/* For CCAN */
+
+#include <endian.h>
+#include <byteswap.h>
+
+#define HAVE_TYPEOF 1
+#define HAVE_BUILTIN_TYPES_COMPATIBLE_P 1
+
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define HAVE_BIG_ENDIAN 0
+#define HAVE_LITTLE_ENDIAN 1
+#else
+#define HAVE_BIG_ENDIAN 1
+#define HAVE_LITTLE_ENDIAN 0
+#endif
+
+#define HAVE_BYTESWAP_H 1
+#define HAVE_BSWAP_64 1
diff --git a/external/test/test.sh b/external/test/test.sh
new file mode 100755
index 0000000..cfea786
--- /dev/null
+++ b/external/test/test.sh
@@ -0,0 +1,101 @@
+#! /bin/sh
+
+# Copyright 2013-2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+run_binary() {
+ if [ -x "$1" ] ; then
+ $VALGRIND "$1" $2 2>> $STDERR_OUT 1>> $STDOUT_OUT
+ else
+ echo "Fatal error, cannot execute binary '$1'. Did you make?";
+ exit 1;
+ fi
+}
+
+fail_test() {
+ rm -rf "$STDERR_OUT";
+ rm -rf "$STDOUT_OUT";
+ echo "$0 ($CUR_TEST): test failed";
+ exit ${1:-1};
+}
+
+pass_test() {
+ /bin/true;
+}
+
+strip_version_from_result() {
+ VERSION=$(./make_version.sh $1)
+ sed -i "s/${VERSION}/VERSION/" $STDERR_OUT
+ sed -i "s/${VERSION}/VERSION/" $STDOUT_OUT
+}
+
+diff_with_result() {
+ # Explicitly diff a file with an arbitary result file
+ if [ "$#" -eq 1 ] ; then
+ if ! diff -u "$RESULT" "$1" ; then
+ fail_test;
+ fi
+ # Otherwise just diff result.out with stdout and result.err with stderr
+ else
+ if ! diff -u "${RESULT}.out" "$STDOUT_OUT" ; then
+ fail_test;
+ fi
+ if ! diff -u "${RESULT}.err" "$STDERR_OUT" ; then
+ fail_test;
+ fi
+ fi
+}
+
+run_tests() {
+ if [ $# -ne 2 ] ; then
+ echo "Usage run_tests test_dir result_dir";
+ exit 1;
+ fi
+
+ all_tests="$1";
+ res_path="$2";
+
+ if [ ! -d "$res_path" ] ; then
+ echo "Result path isn't a valid directory";
+ exit 1;
+ fi
+
+ export STDERR_OUT=$(mktemp --tmpdir external-test-stderr.XXXXXX);
+ export STDOUT_OUT=$(mktemp --tmpdir external-test-stdout.XXXXXX);
+
+
+ for the_test in $all_tests; do
+ export CUR_TEST=$(basename $the_test)
+ export RESULT="$res_path/$CUR_TEST"
+
+ . "$the_test";
+ R="$?"
+ if [ "$R" -ne 0 ] ; then
+ fail_test "$R";
+ fi
+ #reset for next test
+ > "$STDERR_OUT";
+ > "$STDOUT_OUT";
+ done
+
+ rm -rf $STDERR_OUT;
+ rm -rf $STDOUT_OUT;
+
+ echo "$0 tests passed"
+
+ exit 0;
+}
+
diff --git a/hdata/hdif.c b/hdata/hdif.c
index 916b4dd..25c0000 100644
--- a/hdata/hdif.c
+++ b/hdata/hdif.c
@@ -22,7 +22,7 @@ const void *HDIF_get_idata(const struct HDIF_common_hdr *hdif, unsigned int di,
const struct HDIF_common_hdr *hdr = hdif;
const struct HDIF_idata_ptr *iptr;
- if (hdr->d1f0 != BE16_TO_CPU(0xd1f0)) {
+ if (be16_to_cpu(hdr->d1f0) != 0xd1f0) {
prerror("HDIF: Bad header format !\n");
return NULL;
}
diff --git a/hdata/hdif.h b/hdata/hdif.h
index fad7454..ef03522 100644
--- a/hdata/hdif.h
+++ b/hdata/hdif.h
@@ -73,7 +73,7 @@ struct HDIF_child_ptr {
#define HDIF_IDATA_PTR(_offset, _size) \
{ \
.offset = CPU_TO_BE32(_offset), \
- .size = _size, \
+ .size = CPU_TO_BE32(_size), \
}
static inline bool HDIF_check(const void *hdif, const char id[])
diff --git a/hdata/iohub.c b/hdata/iohub.c
index 7dae74c..7676adf 100644
--- a/hdata/iohub.c
+++ b/hdata/iohub.c
@@ -22,7 +22,6 @@
#include <ccan/str/str.h>
#include <ccan/array_size/array_size.h>
#include <device.h>
-#include <p5ioc2.h>
#include <p7ioc.h>
#include <vpd.h>
#include <inttypes.h>
@@ -48,9 +47,9 @@ static void io_add_common(struct dt_node *hn, const struct cechub_io_hub *hub)
*/
dt_add_property(hn, "ranges", NULL, 0);
dt_add_property_cells(hn, "ibm,gx-bar-1",
- hi32(hub->gx_ctrl_bar1), lo32(hub->gx_ctrl_bar1));
+ hi32(be64_to_cpu(hub->gx_ctrl_bar1)), lo32(be64_to_cpu(hub->gx_ctrl_bar1)));
dt_add_property_cells(hn, "ibm,gx-bar-2",
- hi32(hub->gx_ctrl_bar2), lo32(hub->gx_ctrl_bar2));
+ hi32(be64_to_cpu(hub->gx_ctrl_bar2)), lo32(be64_to_cpu(hub->gx_ctrl_bar2)));
/* Add presence detect if valid */
if (hub->flags & CECHUB_HUB_FLAG_FAB_BR0_PDT)
@@ -131,58 +130,6 @@ static void io_get_loc_code(const void *sp_iohubs, struct dt_node *hn, const cha
}
}
-static struct dt_node *io_add_p5ioc2(const struct cechub_io_hub *hub,
- const void *sp_iohubs)
-{
- struct dt_node *hn;
- uint64_t reg[2];
-
- const void *kwvpd;
- unsigned int kwvpd_sz;
-
- prlog(PR_DEBUG, " GX#%d BUID_Ext = 0x%x\n",
- be32_to_cpu(hub->gx_index),
- be32_to_cpu(hub->buid_ext));
- prlog(PR_DEBUG, " GX BAR 0 = 0x%016"PRIx64"\n",
- be64_to_cpu(hub->gx_ctrl_bar0));
- prlog(PR_DEBUG, " GX BAR 1 = 0x%016"PRIx64"\n",
- be64_to_cpu(hub->gx_ctrl_bar1));
- prlog(PR_DEBUG, " GX BAR 2 = 0x%016"PRIx64"\n",
- be64_to_cpu(hub->gx_ctrl_bar2));
- prlog(PR_DEBUG, " GX BAR 3 = 0x%016"PRIx64"\n",
- be64_to_cpu(hub->gx_ctrl_bar3));
- prlog(PR_DEBUG, " GX BAR 4 = 0x%016"PRIx64"\n",
- be64_to_cpu(hub->gx_ctrl_bar4));
-
- /* We assume SBAR == GX0 + some hard coded offset */
- reg[0] = cleanup_addr(be64_to_cpu(hub->gx_ctrl_bar0) + P5IOC2_REGS_OFFSET);
- reg[1] = 0x2000000;
-
- hn = dt_new_addr(dt_root, "io-hub", reg[0]);
- if (!hn)
- return NULL;
-
- dt_add_property(hn, "reg", reg, sizeof(reg));
- dt_add_property_strings(hn, "compatible", "ibm,p5ioc2");
-
- kwvpd = HDIF_get_idata(sp_iohubs, CECHUB_ASCII_KEYWORD_VPD, &kwvpd_sz);
- if (kwvpd && kwvpd != sp_iohubs) {
- /*
- * XX We don't know how to properly find the LXRn
- * record so for now we'll just try LXR0 and if not
- * found, we try LXR1
- */
- if (!io_get_lx_info(kwvpd, kwvpd_sz, 0, hn))
- io_get_lx_info(kwvpd, kwvpd_sz, 1, hn);
- } else
- prlog(PR_DEBUG, "CEC: P5IOC2 Keywords not found.\n");
-
- /* Get slots base loc code */
- io_get_loc_code(sp_iohubs, hn, "ibm,io-base-loc-code");
-
- return hn;
-}
-
static struct dt_node *io_add_p7ioc(const struct cechub_io_hub *hub,
const void *sp_iohubs)
{
@@ -207,8 +154,8 @@ static struct dt_node *io_add_p7ioc(const struct cechub_io_hub *hub,
be64_to_cpu(hub->gx_ctrl_bar4));
/* We only know about memory map 1 */
- if (hub->mem_map_vers != 1) {
- prerror("P7IOC: Unknown memory map %d\n", hub->mem_map_vers);
+ if (be32_to_cpu(hub->mem_map_vers) != 1) {
+ prerror("P7IOC: Unknown memory map %d\n", be32_to_cpu(hub->mem_map_vers));
/* We try to continue anyway ... */
}
@@ -354,7 +301,7 @@ static void io_add_p8_cec_vpd(const struct HDIF_common_hdr *sp_iohubs)
prlog(PR_WARNING, "CEC: IOKID count is 0 !\n");
return;
}
- if (iokids->count > 1) {
+ if (be32_to_cpu(iokids->count) > 1) {
prlog(PR_WARNING, "CEC: WARNING ! More than 1 IO KID !!! (%d)\n",
iokids->count);
/* Ignoring the additional ones */
@@ -379,213 +326,6 @@ static void io_add_p8_cec_vpd(const struct HDIF_common_hdr *sp_iohubs)
io_get_lx_info(kwvpd, kwvpd_sz, 0, dt_root);
}
-static struct dt_node *io_add_hea(const struct cechub_io_hub *hub,
- const void *sp_io)
-{
- struct dt_node *np, *gnp;
- uint64_t reg[2];
- unsigned int i, vpd_sz;
- uint8_t kw_sz;
- const void *iokid, *vpd, *ccin;
- const uint8_t *mac;
- const struct HDIF_child_ptr *iokids;
-
- /*
- * We have a table of supported dauther cards looked up
- * by CCIN. We don't use the 1008 slot map in the VPD.
- *
- * This is basically translated from BML and will do for
- * now especially since we don't really support p5ioc2
- * machine, this is just for lab use
- *
- * This is mostly untested on 10G ... we might need more
- * info about the PHY in that case
- */
- const struct hea_iocard {
- const char ccin[4];
- struct {
- uint32_t speed;
- uint16_t ports;
- uint16_t phy_id;
- } pg[2];
- } hea_iocards[] = {
- {
- .ccin = "1818", /* HV4 something */
- .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
- },
- {
- .ccin = "1819", /* HV4 Titov Card */
- .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
- .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 0 },
- },
- {
- .ccin = "1830", /* HV4 Sergei Card */
- .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 0 },
- .pg[1] = { .speed = 10000, .ports = 1, .phy_id = 0 },
- },
- {
- .ccin = "181A", /* L4 Evans Card */
- .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 0 },
- },
- {
- .ccin = "181B", /* L4 Weber Card */
- .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 0 },
- .pg[1] = { .speed = 10000, .ports = 1, .phy_id = 0 },
- },
- {
- .ccin = "181C", /* HV4 Gibson Card */
- .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
- .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 0 },
- },
- {
- .ccin = "2BC4", /* MR Riverside 2 */
- .pg[0] = { .speed = 1000, .ports = 1, .phy_id = 1 },
- .pg[1] = { .speed = 1000, .ports = 1, .phy_id = 1 },
- },
- {
- .ccin = "2BC5", /* MR Lions 2 */
- .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 1 },
- .pg[1] = { .speed = 10000, .ports = 1, .phy_id = 1 },
- },
- {
- .ccin = "2BC6", /* MR Onion 2 */
- .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 1 },
- .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 1 },
- },
- {
- .ccin = "266D", /* Jupiter Bonzai */
- .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 1 },
- .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 1 },
- },
- /* The blade use an IO KID that's a bit oddball and seems to
- * represent the backplane itself, but let's use it anyway
- *
- * XXX Probably want a different PHY type !
- */
- {
- .ccin = "531C", /* P7 Blade */
- .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
- },
- };
- const struct hea_iocard *card = NULL;
-
- /* WARNING: This makes quite a lot of nasty assumptions
- * that appear to hold true on the few machines I care
- * about, which is good enough for now. We don't officially
- * support p5ioc2 anyway...
- */
-
- /* Get first IO KID, we only support one. Real support would
- * mean using the FRU ID and the SLCA to find the right "stuff"
- * but at this stage it's unnecessary
- */
- iokids = HDIF_child_arr(sp_io, CECHUB_CHILD_IO_KIDS);
- if (!CHECK_SPPTR(iokids)) {
- prerror("HEA: no IOKID in HDAT child array !\n");
- return NULL;
- }
- if (!iokids->count) {
- prerror("HEA: IOKID count is 0 !\n");
- return NULL;
- }
- if (iokids->count > 1) {
- prlog(PR_WARNING, "HEA: WARNING ! More than 1 IO KID !!! (%d)\n",
- iokids->count);
- }
- iokid = HDIF_child(sp_io, iokids, 0, "IO KID");
- if (!iokid) {
- prerror("HEA: Failed to retrieve IO KID 0 !\n");
- return NULL;
- }
-
- /* Grab VPD */
- vpd = HDIF_get_idata(iokid, IOKID_KW_VPD, &vpd_sz);
- if (!CHECK_SPPTR(vpd)) {
- prerror("HEA: Failed to retrieve VPD from IO KID !\n");
- return NULL;
- }
-
- /* Grab the MAC address */
- mac = vpd_find(vpd, vpd_sz, "VINI", "B1", &kw_sz);
- if (!mac || kw_sz < 8) {
- prerror("HEA: Failed to retrieve MAC Address !\n");
- return NULL;
- }
-
- /* Grab the CCIN (card ID) */
- ccin = vpd_find(vpd, vpd_sz, "VINI", "CC", &kw_sz);
- if (!ccin || kw_sz < 4) {
- prerror("HEA: Failed to retrieve CCIN !\n");
- return NULL;
- }
-
- /* Now we could try to parse the 1008 slot map etc... but instead
- * we'll do like BML and grab the CCIN & use it for known cards.
- * We also grab the MAC
- */
- for (i = 0; i < ARRAY_SIZE(hea_iocards) && !card; i++) {
- if (strncmp(hea_iocards[i].ccin, ccin, 4))
- continue;
- card = &hea_iocards[i];
- }
- if (!card) {
- prerror("HEA: Unknown CCIN 0x%.4s!\n", (const char *)ccin);
- return NULL;
- }
-
- /* Assume base address is BAR3 + 0x4000000000 */
- reg[0] = hub->gx_ctrl_bar3 + 0x4000000000;
- reg[1] = 0xc0000000;
-
- prlog(PR_DEBUG, "CEC: * Adding HEA to P5IOC2, assuming GBA=0x%llx\n",
- (long long)reg[0]);
- np = dt_new_addr(dt_root, "ibm,hea", reg[0]);
- if (!np)
- return NULL;
-
- dt_add_property(np, "reg", reg, sizeof(reg));
- dt_add_property_strings(np, "compatible", "ibm,p5ioc2-hea");
- dt_add_property_cells(np, "#address-cells", 1);
- dt_add_property_cells(np, "#size-cells", 0);
- dt_add_property(np, "ibm,vpd", vpd, vpd_sz);
- dt_add_property_cells(np, "#mac-address", mac[7]);
- dt_add_property(np, "mac-address-base", mac, 6);
- /* BUID is base + 0x30 */
- dt_add_property(np, "interrupt-controller", NULL, 0);
- dt_add_property_cells(np, "interrupt-base",
- ((hub->buid_ext << 9) | 0x30) << 4);
- dt_add_property_cells(np, "interrupt-max-count", 128);
-
- /* Always 2 port groups */
- for (i = 0; i < 2; i++) {
- unsigned int clause;
-
- switch(card->pg[i].speed) {
- case 1000:
- clause = 0x22;
- break;
- case 10000:
- clause = 0x45;
- break;
- default:
- /* Unused port group */
- continue;
- }
- gnp = dt_new_addr(np, "portgroup", i + 1);
- if (!gnp)
- continue;
-
- dt_add_property_cells(gnp, "reg", i + 1);
- dt_add_property_cells(gnp, "speed", card->pg[i].speed);
- /* XX FIXME */
- dt_add_property_strings(gnp, "phy-type", "mdio");
- dt_add_property_cells(gnp, "phy-mdio-addr", card->pg[i].phy_id);
- dt_add_property_cells(gnp, "phy-mdio-clause", clause);
- dt_add_property_cells(gnp, "subports", card->pg[i].ports);
- }
- return np;
-}
-
static void io_parse_fru(const void *sp_iohubs)
{
unsigned int i;
@@ -643,12 +383,6 @@ static void io_parse_fru(const void *sp_iohubs)
hn = io_add_p7ioc(hub, sp_iohubs);
io_add_common(hn, hub);
break;
- case CECHUB_HUB_P5IOC2:
- prlog(PR_INFO, "CEC: P5IOC2 !\n");
- hn = io_add_p5ioc2(hub, sp_iohubs);
- io_add_common(hn, hub);
- io_add_hea(hub, sp_iohubs);
- break;
case CECHUB_HUB_MURANO:
case CECHUB_HUB_MURANO_SEGU:
prlog(PR_INFO, "CEC: Murano !\n");
diff --git a/hdata/memory.c b/hdata/memory.c
index 58acd63..01cc61b 100644
--- a/hdata/memory.c
+++ b/hdata/memory.c
@@ -93,13 +93,13 @@ static void append_chip_id(struct dt_node *mem, u32 id)
{
struct dt_property *prop;
size_t len, i;
- u32 *p;
+ be32 *p;
prop = __dt_find_property(mem, "ibm,chip-id");
if (!prop)
return;
len = prop->len >> 2;
- p = (u32 *)prop->prop;
+ p = (be32*)prop->prop;
/* Check if it exists already */
for (i = 0; i < len; i++) {
@@ -109,7 +109,7 @@ static void append_chip_id(struct dt_node *mem, u32 id)
/* Add it to the list */
dt_resize_property(&prop, (len + 1) << 2);
- p = (u32 *)prop->prop;
+ p = (be32 *)prop->prop;
p[len] = cpu_to_be32(id);
}
@@ -130,7 +130,8 @@ static bool add_address_range(struct dt_node *root,
prlog(PR_DEBUG, " Range: 0x%016llx..0x%016llx "
"on Chip 0x%x mattr: 0x%x\n",
- (long long)arange->start, (long long)arange->end,
+ (long long)be64_to_cpu(arange->start),
+ (long long)be64_to_cpu(arange->end),
chip_id, arange->mirror_attr);
/* reg contains start and length */
diff --git a/hdata/paca.c b/hdata/paca.c
index 6f5a1b4..145b825 100644
--- a/hdata/paca.c
+++ b/hdata/paca.c
@@ -112,7 +112,7 @@ static struct dt_node *find_cpu_by_hardware_proc_id(struct dt_node *root,
if (!prop)
return NULL;
- if (be32_to_cpu(*(u32 *)prop->prop) == hw_proc_id)
+ if (be32_to_cpu(*(be32 *)prop->prop) == hw_proc_id)
return i;
}
return NULL;
diff --git a/hdata/spira.c b/hdata/spira.c
index b35ca9c..d0b0ce8 100644
--- a/hdata/spira.c
+++ b/hdata/spira.c
@@ -754,7 +754,7 @@ static void add_iplparams_ipl_params(const void *iplp, struct dt_node *node)
* and the FSP expects the firmware to reset the PCI bus
* numbers and respond with a Power Down (CE,4D,02) message
*/
- if (p->other_attrib & IPLPARAMS_OATTR_RST_PCI_BUSNO)
+ if (be32_to_cpu(p->other_attrib) & IPLPARAMS_OATTR_RST_PCI_BUSNO)
dt_add_property_cells(node, "pci-busno-reset-ipl", 1);
dt_add_property_strings(node, "cec-ipl-side",
(p->ipl_side & IPLPARAMS_CEC_FW_IPL_SIDE_TEMP) ?
@@ -770,7 +770,7 @@ static void add_iplparams_ipl_params(const void *iplp, struct dt_node *node)
led_node = dt_find_by_path(opal_node, DT_PROPERTY_LED_NODE);
assert(led_node);
- if (p->other_attrib & IPLPARAMS_OATRR_LIGHT_PATH)
+ if (be32_to_cpu(p->other_attrib) & IPLPARAMS_OATRR_LIGHT_PATH)
dt_add_property_strings(led_node, DT_PROPERTY_LED_MODE,
LED_MODE_LIGHT_PATH);
else
diff --git a/hdata/spira.h b/hdata/spira.h
index 0916fe3..5ed9329 100644
--- a/hdata/spira.h
+++ b/hdata/spira.h
@@ -476,7 +476,6 @@ struct cechub_io_hub {
#define CECHUB_HUB_FAB_BR1_PDT_PHB4 0x08 /* p7ioc only */
#define CECHUB_HUB_FAB_BR1_PDT_PHB5 0x04 /* p7ioc only */
__be16 iohub_id; /* the type of hub */
-#define CECHUB_HUB_P5IOC2 0x1061 /* from VPL1 */
#define CECHUB_HUB_P7IOC 0x60e7 /* from VPL3 */
#define CECHUB_HUB_MURANO 0x20ef /* Murano from spec */
#define CECHUB_HUB_MURANO_SEGU 0x0001 /* Murano+Seguso from spec */
diff --git a/hw/Makefile.inc b/hw/Makefile.inc
index 034947c..a9dd9f1 100644
--- a/hw/Makefile.inc
+++ b/hw/Makefile.inc
@@ -4,9 +4,9 @@ SUBDIRS += hw
HW_OBJS = xscom.o chiptod.o gx.o cec.o lpc.o lpc-uart.o psi.o
HW_OBJS += homer.o slw.o occ.o fsi-master.o centaur.o
HW_OBJS += nx.o nx-rng.o nx-crypto.o nx-842.o
-HW_OBJS += p7ioc.o p7ioc-inits.o p7ioc-phb.o p5ioc2.o p5ioc2-phb.o
+HW_OBJS += p7ioc.o p7ioc-inits.o p7ioc-phb.o
HW_OBJS += phb3.o sfc-ctrl.o fake-rtc.o bt.o p8-i2c.o prd.o
-HW_OBJS += dts.o lpc-rtc.o
+HW_OBJS += dts.o lpc-rtc.o npu.o npu-hw-procedures.o
HW=hw/built-in.o
include $(SRC)/hw/fsp/Makefile.inc
diff --git a/hw/ast-bmc/ast-sf-ctrl.c b/hw/ast-bmc/ast-sf-ctrl.c
index 0ca32da..bf42d32 100644
--- a/hw/ast-bmc/ast-sf-ctrl.c
+++ b/hw/ast-bmc/ast-sf-ctrl.c
@@ -152,8 +152,11 @@ static int ast_sf_cmd_wr(struct spi_flash_ctrl *ctrl, uint8_t cmd,
static int ast_sf_set_4b(struct spi_flash_ctrl *ctrl, bool enable)
{
struct ast_sf_ctrl *ct = container_of(ctrl, struct ast_sf_ctrl, ops);
+ uint32_t ce_ctrl = 0;
- if (ct->type != AST_SF_TYPE_PNOR)
+ if (ct->type == AST_SF_TYPE_BMC && ct->ops.finfo->size > 0x1000000)
+ ce_ctrl = ast_ahb_readl(BMC_SPI_FCTL_CE_CTRL);
+ else if (ct->type != AST_SF_TYPE_PNOR)
return enable ? FLASH_ERR_4B_NOT_SUPPORTED : 0;
/*
@@ -164,15 +167,20 @@ static int ast_sf_set_4b(struct spi_flash_ctrl *ctrl, bool enable)
if (enable) {
ct->ctl_val |= 0x2000;
ct->ctl_read_val |= 0x2000;
+ ce_ctrl |= 0x1;
} else {
ct->ctl_val &= ~0x2000;
ct->ctl_read_val &= ~0x2000;
+ ce_ctrl &= ~0x1;
}
ct->mode_4b = enable;
/* Update read mode */
ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+ if (ce_ctrl && ct->type == AST_SF_TYPE_BMC)
+ ast_ahb_writel(ce_ctrl, BMC_SPI_FCTL_CE_CTRL);
+
return 0;
}
diff --git a/hw/bt.c b/hw/bt.c
index a53ff14..df4a4f0 100644
--- a/hw/bt.c
+++ b/hw/bt.c
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#define pr_fmt(fmt) "BT: " fmt
#include <skiboot.h>
#include <lpc.h>
#include <lock.h>
@@ -22,6 +23,7 @@
#include <ipmi.h>
#include <bt.h>
#include <timer.h>
+#include <ipmi.h>
/* BT registers */
#define BT_CTRL 0
@@ -39,6 +41,9 @@
#define BT_INTMASK_B2H_IRQ 0x02
#define BT_INTMASK_BMC_HWRST 0x80
+/* Maximum size of the HW FIFO */
+#define BT_FIFO_LEN 64
+
/* Default poll interval before interrupts are working */
#define BT_DEFAULT_POLL_MS 200
@@ -67,37 +72,57 @@
/*
* Maximum number of times to attempt sending a message before giving up.
*/
-#define BT_MAX_RETRY_COUNT 1
+#define BT_MAX_SEND_COUNT 2
#define BT_QUEUE_DEBUG 0
-#define BT_ERR(msg, fmt, args...) \
- do { prerror("BT seq 0x%02x netfn 0x%02x cmd 0x%02x: " fmt "\n", \
- (msg)->seq, (msg)->ipmi_msg.netfn, (msg)->ipmi_msg.cmd, ##args); \
+#define _BT_Q_LOG(level, msg, fmt, args...) \
+ do { if (msg) \
+ prlog(level, "seq 0x%02x netfn 0x%02x cmd 0x%02x: " fmt "\n", \
+ (msg)->seq, (msg)->ipmi_msg.netfn, (msg)->ipmi_msg.cmd, ##args); \
+ else \
+ prlog(level, "seq 0x?? netfn 0x?? cmd 0x??: " fmt "\n", ##args); \
} while(0)
-enum bt_states {
- BT_STATE_IDLE = 0,
- BT_STATE_RESP_WAIT,
-};
+
+/*
+ * takes a struct bt_msg *
+ */
+#define BT_Q_ERR(msg, fmt, args...) \
+ _BT_Q_LOG(PR_ERR, msg, fmt, ##args)
+
+#define BT_Q_DBG(msg, fmt, args...) \
+ _BT_Q_LOG(PR_DEBUG, msg, fmt, ##args)
+
+#define BT_Q_INF(msg, fmt, args...) \
+ _BT_Q_LOG(PR_INFO, msg, fmt, ##args)
struct bt_msg {
struct list_node link;
unsigned long tb;
uint8_t seq;
- uint8_t retry_count;
+ uint8_t send_count;
struct ipmi_msg ipmi_msg;
};
+struct bt_caps {
+ uint8_t num_requests;
+ uint16_t input_buf_len;
+ uint16_t output_buf_len;
+ uint8_t msg_timeout;
+ uint8_t num_retries;
+};
+
struct bt {
uint32_t base_addr;
- enum bt_states state;
struct lock lock;
struct list_head msgq;
struct timer poller;
bool irq_ok;
int queue_len;
+ struct bt_caps caps;
};
+
static struct bt bt;
static int ipmi_seq;
@@ -121,16 +146,79 @@ static inline void bt_set_h_busy(bool value)
bt_outb(BT_CTRL_H_BUSY, BT_CTRL);
}
-static inline bool bt_idle(void)
+static inline void bt_assert_h_busy(void)
{
- uint8_t bt_ctrl = bt_inb(BT_CTRL);
+ uint8_t rval;
+ rval = bt_inb(BT_CTRL);
+ assert(rval & BT_CTRL_H_BUSY);
+}
- return !(bt_ctrl & BT_CTRL_B_BUSY) && !(bt_ctrl & BT_CTRL_H2B_ATN);
+static void get_bt_caps_complete(struct ipmi_msg *msg)
+{
+ /* Ignore errors, we'll fallback to using the defaults, no big deal */
+
+ if (msg->data[0] == 0) {
+ prlog(PR_DEBUG, "Got illegal BMC BT capability\n");
+ goto out;
+ }
+
+ if (msg->data[1] + 1 != BT_FIFO_LEN) {
+ prlog(PR_DEBUG, "Got a input buffer len (%u) cap which differs from the default\n",
+ msg->data[1]);
+ goto out;
+ }
+
+ if (msg->data[2] + 1 != BT_FIFO_LEN) {
+ prlog(PR_DEBUG, "Got a output buffer len (%u) cap which differs from the default\n",
+ msg->data[2]);
+ goto out;
+ }
+
+ /*
+ * IPMI Spec says that the value for buffer sizes are:
+ * "the largest value allowed in first byte"
+ * Therefore we want to add one to what we get
+ */
+ bt.caps.num_requests = msg->data[0];
+ bt.caps.input_buf_len = msg->data[1] + 1;
+ bt.caps.output_buf_len = msg->data[2] + 1;
+ bt.caps.msg_timeout = msg->data[3];
+ bt.caps.num_retries = msg->data[4];
+ prlog(PR_DEBUG, "BMC BT capabilities received:\n");
+ prlog(PR_DEBUG, "buffer sizes: %d input %d output\n",
+ bt.caps.input_buf_len, bt.caps.output_buf_len);
+ prlog(PR_DEBUG, "number of requests: %d\n", bt.caps.num_requests);
+ prlog(PR_DEBUG, "msg timeout: %d max retries: %d\n",
+ bt.caps.msg_timeout, bt.caps.num_retries);
+
+out:
+ ipmi_free_msg(msg);
+}
+
+static void get_bt_caps(void)
+{
+
+ struct ipmi_msg *bmc_caps;
+ /*
+ * Didn't sent a message, now is a good time to ask the BMC for its
+ * capabilities.
+ */
+ bmc_caps = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, IPMI_GET_BT_CAPS,
+ get_bt_caps_complete, NULL, NULL, 0, sizeof(struct bt_caps));
+ if (!bmc_caps)
+ prerror("Couldn't create BMC BT capabilities msg\n");
+
+ if (bmc_caps && ipmi_queue_msg(bmc_caps))
+ prerror("Couldn't enqueue request for BMC BT capabilities\n");
+
+ /* Ignore errors, we'll fallback to using the defaults, no big deal */
}
-static inline void bt_set_state(enum bt_states next_state)
+static inline bool bt_idle(void)
{
- bt.state = next_state;
+ uint8_t bt_ctrl = bt_inb(BT_CTRL);
+
+ return !(bt_ctrl & BT_CTRL_B_BUSY) && !(bt_ctrl & BT_CTRL_H2B_ATN);
}
/* Must be called with bt.lock held */
@@ -152,8 +240,6 @@ static void bt_init_interface(void)
/* Take care of a stable H_BUSY if any */
bt_set_h_busy(false);
-
- bt_set_state(BT_STATE_IDLE);
}
static void bt_reset_interface(void)
@@ -165,15 +251,11 @@ static void bt_reset_interface(void)
/* Try and send a message from the message queue. Caller must hold
* bt.bt_lock and bt.lock and ensue the message queue is not
* empty. */
-static void bt_send_msg(void)
+static void bt_send_msg(struct bt_msg *bt_msg)
{
int i;
- struct bt_msg *bt_msg;
struct ipmi_msg *ipmi_msg;
- bt_msg = list_top(&bt.msgq, struct bt_msg, link);
- assert(bt_msg);
-
ipmi_msg = &bt_msg->ipmi_msg;
/* Send the message */
@@ -195,16 +277,29 @@ static void bt_send_msg(void)
for (i = 0; i < ipmi_msg->req_size; i++)
bt_outb(ipmi_msg->data[i], BT_HOST2BMC);
- bt_msg->tb = mftb();
+ BT_Q_DBG(bt_msg, "Message sent to host");
+ bt_msg->send_count++;
+
bt_outb(BT_CTRL_H2B_ATN, BT_CTRL);
- bt_set_state(BT_STATE_RESP_WAIT);
return;
}
+static void bt_clear_fifo(void)
+{
+ int i;
+
+ for (i = 0; i < bt.caps.input_buf_len; i++)
+ bt_outb(0xff, BT_HOST2BMC);
+}
+
static void bt_flush_msg(void)
{
- bt_outb(BT_CTRL_B2H_ATN | BT_CTRL_CLR_RD_PTR, BT_CTRL);
+ bt_assert_h_busy();
+ bt_outb(BT_CTRL_B2H_ATN | BT_CTRL_CLR_RD_PTR | BT_CTRL_CLR_WR_PTR, BT_CTRL);
+ bt_clear_fifo();
+ /* Can't hurt to clear the write pointer again, just to be sure */
+ bt_outb(BT_CTRL_CLR_WR_PTR, BT_CTRL);
bt_set_h_busy(false);
}
@@ -249,9 +344,9 @@ static void bt_get_resp(void)
}
if (!bt_msg) {
/* A response to a message we no longer care about. */
- prlog(PR_INFO, "BT: Nobody cared about a response to an BT/IPMI message\n");
+ prlog(PR_INFO, "Nobody cared about a response to an BT/IPMI message"
+ "(seq 0x%02x netfn 0x%02x cmd 0x%02x)\n", seq, netfn, cmd);
bt_flush_msg();
- bt_set_state(BT_STATE_IDLE);
return;
}
@@ -263,7 +358,7 @@ static void bt_get_resp(void)
* bt_inb(BT_HOST2BMC) < BT_MIN_RESP_LEN (which should never occur).
*/
if (resp_len > ipmi_msg->resp_size) {
- BT_ERR(bt_msg, "Invalid resp_len %d", resp_len);
+ BT_Q_ERR(bt_msg, "Invalid resp_len %d", resp_len);
resp_len = ipmi_msg->resp_size;
cc = IPMI_ERR_MSG_TRUNCATED;
}
@@ -274,7 +369,7 @@ static void bt_get_resp(void)
ipmi_msg->data[i] = bt_inb(BT_HOST2BMC);
bt_set_h_busy(false);
- bt_set_state(BT_STATE_IDLE);
+ BT_Q_DBG(bt_msg, "IPMI MSG done");
list_del(&bt_msg->link);
bt.queue_len--;
@@ -283,10 +378,6 @@ static void bt_get_resp(void)
/*
* Call the IPMI layer to finish processing the message.
*/
-#if BT_QUEUE_DEBUG
- prlog(PR_DEBUG, "cmd 0x%02x done\n", seq);
-#endif
-
ipmi_cmd_done(cmd, netfn, cc, ipmi_msg);
lock(&bt.lock);
@@ -299,18 +390,19 @@ static void bt_expire_old_msg(uint64_t tb)
bt_msg = list_top(&bt.msgq, struct bt_msg, link);
- if (bt_msg && bt_msg->tb > 0 && (bt_msg->tb + BT_MSG_TIMEOUT) < tb) {
- if (bt_msg->retry_count < BT_MAX_RETRY_COUNT) {
+ if (bt_msg && bt_msg->tb > 0 && (bt_msg->tb + bt.caps.msg_timeout) < tb) {
+ if (bt_msg->send_count < BT_MAX_SEND_COUNT) {
/* A message timeout is usually due to the BMC
clearing the H2B_ATN flag without actually
doing anything. The data will still be in the
FIFO so just reset the flag.*/
- BT_ERR(bt_msg, "Retry sending message");
- bt_msg->retry_count++;
+ BT_Q_ERR(bt_msg, "Retry sending message");
+ bt_msg->send_count++;
+
bt_msg->tb = tb;
bt_outb(BT_CTRL_H2B_ATN, BT_CTRL);
} else {
- BT_ERR(bt_msg, "Timeout sending message");
+ BT_Q_ERR(bt_msg, "Timeout sending message");
bt_msg_del(bt_msg);
/* Timing out a message is inherently racy as the BMC
@@ -332,7 +424,7 @@ static void print_debug_queue_info(void)
printed = false;
prlog(PR_DEBUG, "-------- BT Msg Queue --------\n");
list_for_each(&bt.msgq, msg, link) {
- prlog(PR_DEBUG, "Seq: 0x%02x Cmd: 0x%02x\n", msg->seq, msg->ipmi_msg.cmd);
+ BT_Q_DBG(msg, "[ sent %d ]", msg->send_count);
}
prlog(PR_DEBUG, "-----------------------------\n");
} else if (!printed) {
@@ -346,12 +438,29 @@ static void print_debug_queue_info(void) {}
static void bt_send_and_unlock(void)
{
- if (lpc_ok() && bt_idle() && !list_empty(&bt.msgq)
- && bt.state == BT_STATE_IDLE)
- bt_send_msg();
+ if (lpc_ok() && !list_empty(&bt.msgq)) {
+ struct bt_msg *bt_msg;
+
+ bt_msg = list_top(&bt.msgq, struct bt_msg, link);
+ assert(bt_msg);
+
+ /* Start the message timeout once it gets to the top
+ * of the queue. This will ensure we timeout messages
+ * in the case of a broken bt interface as occurs when
+ * the BMC is not responding to any IPMI messages. */
+ if (bt_msg->tb == 0)
+ bt_msg->tb = mftb();
+
+ /*
+ * Only send it if we haven't already.
+ * Timeouts and retries happen in bt_expire_old_msg()
+ * called from bt_poll()
+ */
+ if (bt_idle() && bt_msg->send_count == 0)
+ bt_send_msg(bt_msg);
+ }
unlock(&bt.lock);
- return;
}
static void bt_poll(struct timer *t __unused, void *data __unused,
@@ -372,8 +481,7 @@ static void bt_poll(struct timer *t __unused, void *data __unused,
bt_ctrl = bt_inb(BT_CTRL);
/* Is there a response waiting for us? */
- if (bt.state == BT_STATE_RESP_WAIT &&
- (bt_ctrl & BT_CTRL_B2H_ATN))
+ if (bt_ctrl & BT_CTRL_B2H_ATN)
bt_get_resp();
bt_expire_old_msg(now);
@@ -401,15 +509,15 @@ static void bt_add_msg(struct bt_msg *bt_msg)
{
bt_msg->tb = 0;
bt_msg->seq = ipmi_seq++;
- bt_msg->retry_count = 0;
+ bt_msg->send_count = 0;
bt.queue_len++;
if (bt.queue_len > BT_MAX_QUEUE_LEN) {
/* Maximum queue length exceeded - remove the oldest message
from the queue. */
- BT_ERR(bt_msg, "Maximum queue length exceeded");
+ BT_Q_ERR(bt_msg, "Maximum queue length exceeded");
bt_msg = list_tail(&bt.msgq, struct bt_msg, link);
assert(bt_msg);
- BT_ERR(bt_msg, "Removed from queue");
+ BT_Q_ERR(bt_msg, "Removed from queue");
bt_msg_del(bt_msg);
}
}
@@ -515,6 +623,13 @@ void bt_init(void)
const struct dt_property *prop;
uint32_t irq;
+ /* Set sane capability defaults */
+ bt.caps.num_requests = 1;
+ bt.caps.input_buf_len = BT_FIFO_LEN;
+ bt.caps.output_buf_len = BT_FIFO_LEN;
+ bt.caps.msg_timeout = BT_MSG_TIMEOUT;
+ bt.caps.num_retries = 1;
+
/* We support only one */
n = dt_find_compatible_node(dt_root, NULL, "ipmi-bt");
if (!n)
@@ -523,11 +638,11 @@ void bt_init(void)
/* Get IO base */
prop = dt_find_property(n, "reg");
if (!prop) {
- prerror("BT: Can't find reg property\n");
+ prerror("Can't find reg property\n");
return;
}
if (dt_property_get_cell(prop, 0) != OPAL_LPC_IO) {
- prerror("BT: Only supports IO addresses\n");
+ prerror("Only supports IO addresses\n");
return;
}
bt.base_addr = dt_property_get_cell(prop, 1);
@@ -540,11 +655,10 @@ void bt_init(void)
* The iBT interface comes up in the busy state until the daemon has
* initialised it.
*/
- bt_set_state(BT_STATE_IDLE);
list_head_init(&bt.msgq);
bt.queue_len = 0;
- printf("BT: Interface initialized, IO 0x%04x\n", bt.base_addr);
+ prlog(PR_NOTICE, "Interface initialized, IO 0x%04x\n", bt.base_addr);
ipmi_register_backend(&bt_backend);
@@ -557,5 +671,9 @@ void bt_init(void)
irq = dt_prop_get_u32(n, "interrupts");
bt_lpc_client.interrupts = LPC_IRQ(irq);
lpc_register_client(dt_get_chip_id(n), &bt_lpc_client);
- prlog(PR_DEBUG, "BT: Using LPC IRQ %d\n", irq);
+
+ /* Enqueue an IPMI message to ask the BMC about its BT capabilities */
+ get_bt_caps();
+
+ prlog(PR_DEBUG, "Using LPC IRQ %d\n", irq);
}
diff --git a/hw/cec.c b/hw/cec.c
index 6c0fea8..1743f4d 100644
--- a/hw/cec.c
+++ b/hw/cec.c
@@ -17,7 +17,6 @@
#include <skiboot.h>
#include <cec.h>
#include <p7ioc.h>
-#include <p5ioc2.h>
#include <interrupts.h>
#include <opal-api.h>
@@ -37,6 +36,7 @@ struct io_hub *cec_get_hub_by_id(uint32_t hub_id)
void cec_register(struct io_hub *hub)
{
+ assert(hub->hub_id < MAX_IO_HUBS);
cec_iohubs[hub->hub_id] = hub;
}
@@ -52,19 +52,17 @@ void cec_reset(void)
}
}
+/* This was only supported by p5ioc, which was dropped */
static int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id,
- uint64_t tce_mem_addr,
- uint64_t tce_mem_size)
+ uint64_t tce_mem_addr __unused,
+ uint64_t tce_mem_size __unused)
{
struct io_hub *hub = cec_get_hub_by_id(hub_id);
if (!hub)
return OPAL_PARAMETER;
- if (!hub->ops->set_tce_mem)
- return OPAL_UNSUPPORTED;
-
- return hub->ops->set_tce_mem(hub, tce_mem_addr, tce_mem_size);
+ return OPAL_UNSUPPORTED;
}
opal_call(OPAL_PCI_SET_HUB_TCE_MEMORY, opal_pci_set_hub_tce_memory, 3);
diff --git a/hw/chiptod.c b/hw/chiptod.c
index 88f6c8e..f7427f1 100644
--- a/hw/chiptod.c
+++ b/hw/chiptod.c
@@ -14,9 +14,10 @@
* limitations under the License.
*/
-/*
- * Handle ChipTOD chip & configure core and CAPP timebases
- */
+/* Handle ChipTOD chip & configure core and CAPP timebases */
+
+#define pr_fmt(fmt) "CHIPTOD: " fmt
+
#include <skiboot.h>
#include <chiptod.h>
#include <chip.h>
@@ -235,8 +236,8 @@ static void _chiptod_cache_tod_regs(int32_t chip_id)
for (i = 0; i < ARRAY_SIZE(chiptod_tod_regs); i++) {
if (xscom_read(chip_id, chiptod_tod_regs[i].xscom_addr,
- &(chiptod_tod_regs[i].val[chip_id].data)) != 0) {
- prerror("CHIPTOD: XSCOM error reading 0x%08llx reg.\n",
+ &(chiptod_tod_regs[i].val[chip_id].data))) {
+ prerror("XSCOM error reading 0x%08llx reg.\n",
chiptod_tod_regs[i].xscom_addr);
/* Invalidate this record and continue */
chiptod_tod_regs[i].val[chip_id].valid = 0;
@@ -260,7 +261,7 @@ static void print_topo_info(enum chiptod_topology topo)
const char *status[] = { "Unknown",
"Active Master", "Backup Master", "Backup Master Disabled" };
- prlog(PR_DEBUG, "CHIPTOD: chip id: %d, Role: %s, Status: %s\n",
+ prlog(PR_DEBUG, " Chip id: %d, Role: %s, Status: %s\n",
chiptod_topology_info[topo].id,
role[chiptod_topology_info[topo].role + 1],
status[chiptod_topology_info[topo].status + 1]);
@@ -273,11 +274,11 @@ static void print_topology_info(void)
if (current_topology < 0)
return;
- prlog(PR_DEBUG, "CHIPTOD: TOD Topology in Use: %s\n",
+ prlog(PR_DEBUG, "TOD Topology in Use: %s\n",
topo[current_topology+1]);
- prlog(PR_DEBUG, "CHIPTOD: Primary configuration:\n");
+ prlog(PR_DEBUG, " Primary configuration:\n");
print_topo_info(chiptod_topo_primary);
- prlog(PR_DEBUG, "CHIPTOD: Secondary configuration:\n");
+ prlog(PR_DEBUG, " Secondary configuration:\n");
print_topo_info(chiptod_topo_secondary);
}
@@ -285,8 +286,8 @@ static enum chiptod_topology query_current_topology(void)
{
uint64_t tod_status;
- if (xscom_readme(TOD_STATUS, &tod_status) != 0) {
- prerror("CHIPTOD: XSCOM error reading TOD_STATUS reg\n");
+ if (xscom_readme(TOD_STATUS, &tod_status)) {
+ prerror("XSCOM error reading TOD_STATUS reg\n");
return chiptod_topo_unknown;
}
@@ -310,8 +311,8 @@ chiptod_get_chip_role(enum chiptod_topology topology, int32_t chip_id)
if (chip_id < 0)
return role;
- if (xscom_read(chip_id, TOD_PSMS_CTRL, &tod_ctrl) != 0) {
- prerror("CHIPTOD: XSCOM error reading TOD_PSMS_CTRL\n");
+ if (xscom_read(chip_id, TOD_PSMS_CTRL, &tod_ctrl)) {
+ prerror("XSCOM error reading TOD_PSMS_CTRL\n");
return chiptod_chip_role_UNKNOWN;
}
@@ -367,8 +368,8 @@ static bool chiptod_sync_step_check_running(enum chiptod_topology topology)
if (chip_id < 0)
return false;
- if (xscom_read(chip_id, TOD_STATUS, &tod_status) != 0) {
- prerror("CHIPTOD: XSCOM error reading TOD_STATUS reg\n");
+ if (xscom_read(chip_id, TOD_STATUS, &tod_status)) {
+ prerror("XSCOM error reading TOD_STATUS reg\n");
return false;
}
@@ -450,8 +451,8 @@ static enum chiptod_chip_status _chiptod_get_chip_status(int32_t chip_id)
uint64_t tod_status;
enum chiptod_chip_status status = -1;
- if (xscom_read(chip_id, TOD_STATUS, &tod_status) != 0) {
- prerror("CHIPTOD: XSCOM error reading TOD_STATUS reg\n");
+ if (xscom_read(chip_id, TOD_STATUS, &tod_status)) {
+ prerror("XSCOM error reading TOD_STATUS reg\n");
return status;
}
@@ -509,7 +510,7 @@ static void chiptod_setup_base_tfmr(void)
* The max jitter factor is set to 240 based on what pHyp uses.
*/
mcbs = (core_freq * 240) / (4 * tod_freq) / 100;
- prlog(PR_INFO, "CHIPTOD: Calculated MCBS is 0x%llx"
+ prlog(PR_INFO, "Calculated MCBS is 0x%llx"
" (Cfreq=%lld Tfreq=%lld)\n",
mcbs, core_freq, tod_freq);
@@ -528,16 +529,16 @@ static bool chiptod_mod_tb(void)
mtspr(SPR_TFMR, tfmr | SPR_TFMR_LOAD_TOD_MOD);
do {
if (++timeout >= (TIMEOUT_LOOPS*2)) {
- prerror("CHIPTOD: TB \"Not Set\" timeout\n");
+ prerror("TB \"Not Set\" timeout\n");
return false;
}
tfmr = mfspr(SPR_TFMR);
if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
- prerror("CHIPTOD: TB \"Not Set\" TFMR corrupt\n");
+ prerror("TB \"Not Set\" TFMR corrupt\n");
return false;
}
if (GETFIELD(SPR_TFMR_TBST_ENCODED, tfmr) == 9) {
- prerror("CHIPTOD: TB \"Not Set\" TOD in error state\n");
+ prerror("TB \"Not Set\" TOD in error state\n");
return false;
}
} while(tfmr & SPR_TFMR_LOAD_TOD_MOD);
@@ -547,17 +548,17 @@ static bool chiptod_mod_tb(void)
static bool chiptod_interrupt_check(void)
{
- uint64_t tfmr = mfspr(SPR_TFMR);
+ uint64_t tfmr;
uint64_t timeout = 0;
do {
if (++timeout >= TIMEOUT_LOOPS) {
- prerror("CHIPTOD: Interrupt check fail\n");
+ prerror("Interrupt check fail\n");
return false;
}
tfmr = mfspr(SPR_TFMR);
if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
- prerror("CHIPTOD: Interrupt check TFMR corrupt !\n");
+ prerror("Interrupt check TFMR corrupt !\n");
return false;
}
} while(tfmr & SPR_TFMR_CHIP_TOD_INTERRUPT);
@@ -569,8 +570,8 @@ static bool chiptod_running_check(uint32_t chip_id)
{
uint64_t tval;
- if (xscom_read(chip_id, TOD_CHIPTOD_FSM, &tval) != 0) {
- prerror("CHIPTOD: XSCOM error polling run\n");
+ if (xscom_read(chip_id, TOD_CHIPTOD_FSM, &tval)) {
+ prerror("XSCOM error polling run\n");
return false;
}
if (tval & 0x0800000000000000UL)
@@ -587,11 +588,11 @@ static bool chiptod_poll_running(void)
/* Chip TOD running check */
do {
if (++timeout >= TIMEOUT_LOOPS) {
- prerror("CHIPTOD: Running check fail timeout\n");
+ prerror("Running check fail timeout\n");
return false;
}
- if (xscom_readme(TOD_CHIPTOD_FSM, &tval) != 0) {
- prerror("CHIPTOD: XSCOM error polling run\n");
+ if (xscom_readme(TOD_CHIPTOD_FSM, &tval)) {
+ prerror("XSCOM error polling run\n");
return false;
}
} while(!(tval & 0x0800000000000000UL));
@@ -614,8 +615,8 @@ static bool chiptod_to_tb(void)
* p8: 0b0001 || 4-bit core id
*/
- if (xscom_readme(TOD_PIB_MASTER, &tval) != 0) {
- prerror("CHIPTOD: XSCOM error reading PIB_MASTER\n");
+ if (xscom_readme(TOD_PIB_MASTER, &tval)) {
+ prerror("XSCOM error reading PIB_MASTER\n");
return false;
}
if (chiptod_type == chiptod_p8) {
@@ -627,8 +628,8 @@ static bool chiptod_to_tb(void)
}
tval &= ~TOD_PIBM_ADDR_CFG_MCAST;
tval = SETFIELD(TOD_PIBM_ADDR_CFG_SLADDR, tval, tvbits);
- if (xscom_writeme(TOD_PIB_MASTER, tval) != 0) {
- prerror("CHIPTOD: XSCOM error writing PIB_MASTER\n");
+ if (xscom_writeme(TOD_PIB_MASTER, tval)) {
+ prerror("XSCOM error writing PIB_MASTER\n");
return false;
}
@@ -636,8 +637,8 @@ static bool chiptod_to_tb(void)
mtspr(SPR_TFMR, base_tfmr | SPR_TFMR_MOVE_CHIP_TOD_TO_TB);
/* Tell the ChipTOD to send it */
- if (xscom_writeme(TOD_CHIPTOD_TO_TB, (1ULL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error writing CHIPTOD_TO_TB\n");
+ if (xscom_writeme(TOD_CHIPTOD_TO_TB, PPC_BIT(0))) {
+ prerror("XSCOM error writing CHIPTOD_TO_TB\n");
return false;
}
@@ -645,12 +646,12 @@ static bool chiptod_to_tb(void)
timeout = 0;
do {
if (++timeout >= TIMEOUT_LOOPS) {
- prerror("CHIPTOD: Chip to TB timeout\n");
+ prerror("Chip to TB timeout\n");
return false;
}
tfmr = mfspr(SPR_TFMR);
if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
- prerror("CHIPTOD: MoveToTB: corrupt TFMR !\n");
+ prerror("MoveToTB: corrupt TFMR !\n");
return false;
}
} while(tfmr & SPR_TFMR_MOVE_CHIP_TOD_TO_TB);
@@ -714,12 +715,12 @@ static bool chiptod_reset_tb_errors(void)
/* Don't actually do anything on error for
* now ... not much we can do, panic maybe ?
*/
- prerror("CHIPTOD: TB error reset timeout !\n");
+ prerror("TB error reset timeout !\n");
return false;
}
tfmr = mfspr(SPR_TFMR);
if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
- prerror("CHIPTOD: TB error reset: corrupt TFMR !\n");
+ prerror("TB error reset: corrupt TFMR !\n");
return false;
}
} while(tfmr & SPR_TFMR_CLEAR_TB_ERRORS);
@@ -747,7 +748,7 @@ static void chiptod_reset_tod_errors(void)
* At boot, we clear the errors that the firmware is
* supposed to handle. List provided by the pHyp folks.
*/
-
+
terr = TOD_ERR_CRITC_PARITY;
terr |= TOD_ERR_PSS_HAMMING_DISTANCE;
terr |= TOD_ERR_DELAY_COMPL_PARITY;
@@ -756,8 +757,8 @@ static void chiptod_reset_tod_errors(void)
terr |= TOD_ERR_TOD_FSM_PARITY;
terr |= TOD_ERR_TOD_REGISTER_PARITY;
- if (xscom_writeme(TOD_ERROR, terr) != 0) {
- prerror("CHIPTOD: XSCOM error writing TOD_ERROR !\n");
+ if (xscom_writeme(TOD_ERROR, terr)) {
+ prerror("XSCOM error writing TOD_ERROR !\n");
/* Not much we can do here ... abort ? */
}
}
@@ -766,7 +767,7 @@ static void chiptod_sync_master(void *data)
{
bool *result = data;
- prlog(PR_DEBUG, "CHIPTOD: Master sync on CPU PIR 0x%04x...\n",
+ prlog(PR_DEBUG, "Master sync on CPU PIR 0x%04x...\n",
this_cpu()->pir);
/* Apply base tfmr */
@@ -789,8 +790,8 @@ static void chiptod_sync_master(void *data)
prlog(PR_INSANE, "SYNC MASTER Step 2 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
/* Chip TOD step checkers enable */
- if (xscom_writeme(TOD_TTYPE_2, (1UL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error enabling steppers\n");
+ if (xscom_writeme(TOD_TTYPE_2, PPC_BIT(0))) {
+ prerror("XSCOM error enabling steppers\n");
goto error;
}
@@ -798,24 +799,24 @@ static void chiptod_sync_master(void *data)
/* Chip TOD interrupt check */
if (!chiptod_interrupt_check())
- goto error;
+ goto error;
prlog(PR_INSANE, "SYNC MASTER Step 4 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
/* Switch local chiptod to "Not Set" state */
- if (xscom_writeme(TOD_LOAD_TOD_MOD, (1UL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error sending LOAD_TOD_MOD\n");
+ if (xscom_writeme(TOD_LOAD_TOD_MOD, PPC_BIT(0))) {
+ prerror("XSCOM error sending LOAD_TOD_MOD\n");
goto error;
}
/* Switch all remote chiptod to "Not Set" state */
- if (xscom_writeme(TOD_TTYPE_5, (1UL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error sending TTYPE_5\n");
+ if (xscom_writeme(TOD_TTYPE_5, PPC_BIT(0))) {
+ prerror("XSCOM error sending TTYPE_5\n");
goto error;
}
/* Chip TOD load initial value */
- if (xscom_writeme(TOD_CHIPTOD_LOAD_TB, INIT_TB) != 0) {
- prerror("CHIPTOD: XSCOM error setting init TB\n");
+ if (xscom_writeme(TOD_CHIPTOD_LOAD_TB, INIT_TB)) {
+ prerror("XSCOM error setting init TB\n");
goto error;
}
@@ -831,8 +832,8 @@ static void chiptod_sync_master(void *data)
prlog(PR_INSANE, "SYNC MASTER Step 7 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
/* Send local chip TOD to all chips TOD */
- if (xscom_writeme(TOD_TTYPE_4, (1ULL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error sending TTYPE_4\n");
+ if (xscom_writeme(TOD_TTYPE_4, PPC_BIT(0))) {
+ prerror("XSCOM error sending TTYPE_4\n");
goto error;
}
@@ -854,8 +855,7 @@ static void chiptod_sync_master(void *data)
*result = true;
return;
error:
- prerror("CHIPTOD: Master sync failed! TFMR=0x%016lx\n",
- mfspr(SPR_TFMR));
+ prerror("Master sync failed! TFMR=0x%016lx\n", mfspr(SPR_TFMR));
*result = false;
}
@@ -871,7 +871,7 @@ static void chiptod_sync_slave(void *data)
return;
}
- prlog(PR_DEBUG, "CHIPTOD: Slave sync on CPU PIR 0x%04x...\n",
+ prlog(PR_DEBUG, "Slave sync on CPU PIR 0x%04x...\n",
this_cpu()->pir);
/* Apply base tfmr */
@@ -914,8 +914,7 @@ static void chiptod_sync_slave(void *data)
*result = true;
return;
error:
- prerror("CHIPTOD: Slave sync failed ! TFMR=0x%016lx\n",
- mfspr(SPR_TFMR));
+ prerror("Slave sync failed ! TFMR=0x%016lx\n", mfspr(SPR_TFMR));
*result = false;
}
@@ -949,7 +948,7 @@ bool chiptod_wakeup_resync(void)
return true;
error:
- prerror("CHIPTOD: Resync failed ! TFMR=0x%16lx\n", mfspr(SPR_TFMR));
+ prerror("Resync failed ! TFMR=0x%16lx\n", mfspr(SPR_TFMR));
unlock(&chiptod_lock);
return false;
}
@@ -962,8 +961,8 @@ static int chiptod_recover_tod_errors(void)
int32_t chip_id = this_cpu()->chip_id;
/* Read TOD error register */
- if (xscom_readme(TOD_ERROR, &terr) != 0) {
- prerror("CHIPTOD: XSCOM error reading TOD_ERROR reg\n");
+ if (xscom_readme(TOD_ERROR, &terr)) {
+ prerror("XSCOM error reading TOD_ERROR reg\n");
return 0;
}
/* Check for sync check error and recover */
@@ -986,25 +985,24 @@ static int chiptod_recover_tod_errors(void)
/* Check if we have valid last saved register value. */
if (!chiptod_tod_regs[i].val[chip_id].valid) {
- prerror("CHIPTOD: Failed to restore TOD register: "
- "%08llx", chiptod_tod_regs[i].xscom_addr);
+ prerror("Failed to restore TOD register: %08llx",
+ chiptod_tod_regs[i].xscom_addr);
return 0;
}
- prlog(PR_DEBUG, "CHIPTOD: parity error, "
- "Restoring TOD register: %08llx\n",
- chiptod_tod_regs[i].xscom_addr);
+ prlog(PR_DEBUG, "Parity error, Restoring TOD register: "
+ "%08llx\n", chiptod_tod_regs[i].xscom_addr);
if (xscom_writeme(chiptod_tod_regs[i].xscom_addr,
- chiptod_tod_regs[i].val[chip_id].data) != 0) {
- prerror("CHIPTOD: XSCOM error writing 0x%08llx reg.\n",
+ chiptod_tod_regs[i].val[chip_id].data)) {
+ prerror("XSCOM error writing 0x%08llx reg.\n",
chiptod_tod_regs[i].xscom_addr);
return 0;
}
treset |= chiptod_tod_regs[i].error_bit;
}
- if (treset && (xscom_writeme(TOD_ERROR, treset) != 0)) {
- prerror("CHIPTOD: XSCOM error writing TOD_ERROR !\n");
+ if (treset && (xscom_writeme(TOD_ERROR, treset))) {
+ prerror("XSCOM error writing TOD_ERROR !\n");
return 0;
}
/* We have handled all the TOD errors routed to hypervisor */
@@ -1043,8 +1041,8 @@ static bool chiptod_set_ttype4_mode(struct proc_chip *chip, bool enable)
if (!chip)
return false;
- if (xscom_read(chip->id, TOD_PIB_MASTER, &tval) != 0) {
- prerror("CHIPTOD: XSCOM error reading PIB_MASTER\n");
+ if (xscom_read(chip->id, TOD_PIB_MASTER, &tval)) {
+ prerror("XSCOM error reading PIB_MASTER\n");
return false;
}
@@ -1061,8 +1059,8 @@ static bool chiptod_set_ttype4_mode(struct proc_chip *chip, bool enable)
tval &= ~TOD_PIBM_TTYPE4_SEND_ENBL;
}
- if (xscom_write(chip->id, TOD_PIB_MASTER, tval) != 0) {
- prerror("CHIPTOD: XSCOM error writing PIB_MASTER\n");
+ if (xscom_write(chip->id, TOD_PIB_MASTER, tval)) {
+ prerror("XSCOM error writing PIB_MASTER\n");
return false;
}
return true;
@@ -1096,12 +1094,12 @@ static void chiptod_stop_slave_tods(void)
if (role == chiptod_chip_role_MDMT)
continue;
- if (xscom_write(chip->id, TOD_ERROR_INJECT, terr) != 0)
- prerror("CHIPTOD: XSCOM error writing TOD_ERROR_INJ\n");
+ if (xscom_write(chip->id, TOD_ERROR_INJECT, terr))
+ prerror("XSCOM error writing TOD_ERROR_INJ\n");
if (chiptod_running_check(chip->id)) {
prlog(PR_DEBUG,
- "CHIPTOD: Failed to stop TOD on slave CHIP [%d]\n",
+ "Failed to stop TOD on slave CHIP [%d]\n",
chip->id);
}
}
@@ -1125,7 +1123,7 @@ static bool is_topology_switch_required(void)
* then we need switch topology to recover from TOD error.
*/
if (!chiptod_sync_step_check_running(current_topology)) {
- prlog(PR_DEBUG, "CHIPTOD: Sync/Step network not running\n");
+ prlog(PR_DEBUG, "Sync/Step network not running\n");
return true;
}
@@ -1133,8 +1131,8 @@ static bool is_topology_switch_required(void)
* Check if there is a step check error reported on
* Active master.
*/
- if (xscom_read(active_master_chip, TOD_ERROR, &tod_error) != 0) {
- prerror("CHIPTOD: XSCOM error reading TOD_ERROR reg\n");
+ if (xscom_read(active_master_chip, TOD_ERROR, &tod_error)) {
+ prerror("XSCOM error reading TOD_ERROR reg\n");
/*
* Can't do anything here. But we already found that
* sync/step network is running. Hence return false.
@@ -1143,7 +1141,7 @@ static bool is_topology_switch_required(void)
}
if (tod_error & TOD_ERR_MP0_STEP_CHECK) {
- prlog(PR_DEBUG, "CHIPTOD: TOD step check error\n");
+ prlog(PR_DEBUG, "TOD step check error\n");
return true;
}
@@ -1186,15 +1184,15 @@ static void chiptod_topology_switch_complete(void)
* This isn't documented anywhere. This info is provided by FSP
* folks.
*/
- if (xscom_writeme(LOCAL_CORE_FIR, LFIR_SWITCH_COMPLETE) != 0) {
- prerror("CHIPTOD: XSCOM error writing LOCAL_CORE_FIR\n");
+ if (xscom_writeme(LOCAL_CORE_FIR, LFIR_SWITCH_COMPLETE)) {
+ prerror("XSCOM error writing LOCAL_CORE_FIR\n");
return;
}
/* Save TOD control registers values. */
chiptod_cache_tod_registers();
- prlog(PR_DEBUG, "CHIPTOD: Topology switch complete\n");
+ prlog(PR_DEBUG, "Topology switch complete\n");
print_topology_info();
}
@@ -1212,7 +1210,7 @@ static int chiptod_start_tod(void)
if (is_topology_switch_required()) {
int32_t mchip = chiptod_get_active_master();
- prlog(PR_DEBUG, "CHIPTOD: Need topology switch to recover\n");
+ prlog(PR_DEBUG, "Need topology switch to recover\n");
/*
* There is a failure in StepSync network in current
* active topology. TOD is not running on active master chip.
@@ -1227,15 +1225,15 @@ static int chiptod_start_tod(void)
* is valid and stop all slave TODs in backup topology.
*/
if (!chiptod_backup_valid()) {
- prerror("CHIPTOD: Backup master is not enabled.\n");
- prerror("CHIPTOD: Can not do a topology switch.\n");
+ prerror("Backup master is not enabled. "
+ "Can not do a topology switch.\n");
return 0;
}
chiptod_stop_slave_tods();
- if (xscom_write(mchip, TOD_TTYPE_1, (1UL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error switching primary/secondary\n");
+ if (xscom_write(mchip, TOD_TTYPE_1, PPC_BIT(0))) {
+ prerror("XSCOM error switching primary/secondary\n");
return 0;
}
@@ -1249,7 +1247,7 @@ static int chiptod_start_tod(void)
* Check if new master TOD is running.
*/
if (!chiptod_master_running()) {
- prerror("CHIPTOD: TOD is not running on new master.\n");
+ prerror("TOD is not running on new master.\n");
return 0;
}
@@ -1259,8 +1257,8 @@ static int chiptod_start_tod(void)
* During topology switch, step checkers are disabled
* on all Chip TODs by default. Enable them.
*/
- if (xscom_writeme(TOD_TTYPE_2, (1UL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error enabling steppers\n");
+ if (xscom_writeme(TOD_TTYPE_2, PPC_BIT(0))) {
+ prerror("XSCOM error enabling steppers\n");
return 0;
}
@@ -1284,8 +1282,8 @@ static int chiptod_start_tod(void)
}
/* Switch local chiptod to "Not Set" state */
- if (xscom_writeme(TOD_LOAD_TOD_MOD, (1UL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error sending LOAD_TOD_MOD\n");
+ if (xscom_writeme(TOD_LOAD_TOD_MOD, PPC_BIT(0))) {
+ prerror("XSCOM error sending LOAD_TOD_MOD\n");
return 0;
}
@@ -1293,8 +1291,8 @@ static int chiptod_start_tod(void)
* Request the current TOD value from another chip.
* This will move TOD in running state
*/
- if (xscom_writeme(TOD_TTYPE_3, (1UL << 63)) != 0) {
- prerror("CHIPTOD: XSCOM error sending TTYPE_3\n");
+ if (xscom_writeme(TOD_TTYPE_3, PPC_BIT(0))) {
+ prerror("XSCOM error sending TTYPE_3\n");
return 0;
}
@@ -1343,12 +1341,12 @@ static bool tfmr_recover_tb_errors(uint64_t tfmr)
do {
if (++timeout >= TIMEOUT_LOOPS) {
- prerror("CHIPTOD: TB error reset timeout !\n");
+ prerror("TB error reset timeout !\n");
return false;
}
tfmr = mfspr(SPR_TFMR);
if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
- prerror("CHIPTOD: TB error reset: corrupt TFMR !\n");
+ prerror("TB error reset: corrupt TFMR !\n");
return false;
}
} while (tfmr & SPR_TFMR_CLEAR_TB_ERRORS);
@@ -1405,9 +1403,8 @@ static bool tfmr_recover_non_tb_errors(uint64_t tfmr)
/* Check if TFMR non-TB errors still present. */
if (tfmr & tfmr_reset_errors) {
- prerror(
- "CHIPTOD: TFMR non-TB error recovery failed! TFMR=0x%016lx\n",
- mfspr(SPR_TFMR));
+ prerror("TFMR non-TB error recovery failed! "
+ "TFMR=0x%016lx\n", mfspr(SPR_TFMR));
return false;
}
return true;
@@ -1437,7 +1434,7 @@ static bool chiptod_recover_tfmr_error(void)
/* Check if TFMR parity error still present. */
if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
- prerror("CHIPTOD: TFMR error recovery: corrupt TFMR !\n");
+ prerror("TFMR error recovery: corrupt TFMR !\n");
return false;
}
@@ -1576,8 +1573,8 @@ opal_call(OPAL_RESYNC_TIMEBASE, opal_resync_timebase, 0);
static void chiptod_print_tb(void *data __unused)
{
- prlog(PR_DEBUG, "CHIPTOD: PIR 0x%04x TB=%lx\n",
- this_cpu()->pir, mfspr(SPR_TBRL));
+ prlog(PR_DEBUG, "PIR 0x%04x TB=%lx\n", this_cpu()->pir,
+ mfspr(SPR_TBRL));
}
static bool chiptod_probe(void)
@@ -1606,7 +1603,7 @@ static bool chiptod_probe(void)
}
if (chiptod_type == chiptod_unknown) {
- prerror("CHIPTOD: Unknown TOD type !\n");
+ prerror("Unknown TOD type !\n");
return false;
}
@@ -1625,7 +1622,7 @@ static void chiptod_discover_new_backup(enum chiptod_topology topo)
/* Found new backup master chip. Update the topology info */
if (chip) {
- prlog(PR_DEBUG, "CHIPTOD: New backup master: CHIP [%d]\n",
+ prlog(PR_DEBUG, "New backup master: CHIP [%d]\n",
chip->id);
if (topo == chiptod_topo_primary)
@@ -1636,7 +1633,7 @@ static void chiptod_discover_new_backup(enum chiptod_topology topo)
chiptod_update_topology(topo);
prlog(PR_DEBUG,
- "CHIPTOD: Backup topology configuration changed.\n");
+ "Backup topology configuration changed.\n");
print_topology_info();
}
@@ -1713,7 +1710,7 @@ void chiptod_init(void)
op_display(OP_LOG, OP_MOD_CHIPTOD, 0);
if (!chiptod_probe()) {
- prerror("CHIPTOD: Failed ChipTOD detection !\n");
+ prerror("Failed ChipTOD detection !\n");
op_display(OP_FATAL, OP_MOD_CHIPTOD, 0);
abort();
}
@@ -1726,7 +1723,7 @@ void chiptod_init(void)
/* Calculate the base TFMR value used for everybody */
chiptod_setup_base_tfmr();
- prlog(PR_DEBUG, "CHIPTOD: Base TFMR=0x%016llx\n", base_tfmr);
+ prlog(PR_DEBUG, "Base TFMR=0x%016llx\n", base_tfmr);
/* Schedule master sync */
sres = false;
@@ -1852,7 +1849,7 @@ static bool chiptod_wait_for_chip_sync(void)
/* Read core TFMR until the TB sync occurred */
do {
if (++timeout >= TIMEOUT_LOOPS) {
- prerror("CHIPTOD: No sync pulses\n");
+ prerror("No sync pulses\n");
return false;
}
tfmr = mfspr(SPR_TFMR);
diff --git a/hw/fake-rtc.c b/hw/fake-rtc.c
index 1b7c473..eca3f1b 100644
--- a/hw/fake-rtc.c
+++ b/hw/fake-rtc.c
@@ -17,25 +17,53 @@
#include <skiboot.h>
#include <opal.h>
#include <mem_region.h>
+#include <device.h>
+#include <timebase.h>
+#include <time-utils.h>
+#include <lock.h>
-static uint32_t *fake_ymd;
-static uint64_t *fake_hmsm;
+/* timebase when tm_offset was assigned */
+static unsigned long tb_synctime;
+
+/*
+ * Absolute time that was last assigned.
+ * Current rtc value is calculated from this.
+*/
+static struct tm tm_offset;
+
+/* protects tm_offset & tb_synctime */
+static struct lock emulation_lock;
static int64_t fake_rtc_write(uint32_t ymd, uint64_t hmsm)
{
- *fake_ymd = ymd;
- *fake_hmsm = hmsm;
+
+ lock(&emulation_lock);
+
+ datetime_to_tm(ymd, hmsm, &tm_offset);
+ tb_synctime = mftb();
+
+ unlock(&emulation_lock);
return OPAL_SUCCESS;
}
static int64_t fake_rtc_read(uint32_t *ymd, uint64_t *hmsm)
{
+
+ time_t sec;
+ struct tm tm_calculated;
+
if (!ymd || !hmsm)
return OPAL_PARAMETER;
- *ymd = *fake_ymd;
- *hmsm = *fake_hmsm;
+ /* Compute the emulated clock value */
+ lock(&emulation_lock);
+
+ sec = tb_to_secs(mftb() - tb_synctime) + mktime(&tm_offset);
+ gmtime_r(&sec, &tm_calculated);
+ tm_to_datetime(&tm_calculated, ymd, hmsm);
+
+ unlock(&emulation_lock);
return OPAL_SUCCESS;
}
@@ -43,7 +71,9 @@ static int64_t fake_rtc_read(uint32_t *ymd, uint64_t *hmsm)
void fake_rtc_init(void)
{
struct mem_region *rtc_region = NULL;
- uint32_t *rtc = NULL;
+ uint32_t *rtc = NULL, *fake_ymd;
+ uint64_t *fake_hmsm;
+ struct dt_node *np;
/* Read initial values from reserved memory */
rtc_region = find_mem_region("ibm,fake-rtc");
@@ -54,14 +84,25 @@ void fake_rtc_init(void)
return;
}
+ init_lock(&emulation_lock);
+
+ /* Fetch the initial rtc values */
rtc = (uint32_t *) rtc_region->start;
fake_ymd = rtc;
fake_hmsm = ((uint64_t *) &rtc[1]);
- prlog(PR_TRACE, "Init fake RTC to 0x%x 0x%llx\n",
- *fake_ymd, *fake_hmsm);
+ fake_rtc_write(*fake_ymd, *fake_hmsm);
+ /* Register opal calls */
opal_register(OPAL_RTC_READ, fake_rtc_read, 2);
opal_register(OPAL_RTC_WRITE, fake_rtc_write, 2);
+
+ /* add the fake rtc dt node */
+ np = dt_new(opal_node, "rtc");
+ dt_add_property_strings(np, "compatible", "ibm,opal-rtc");
+
+ prlog(PR_TRACE, "Init fake RTC to Date:%d-%d-%d Time:%d-%d-%d\n",
+ tm_offset.tm_mon, tm_offset.tm_mday, tm_offset.tm_year,
+ tm_offset.tm_hour, tm_offset.tm_min, tm_offset.tm_sec);
}
diff --git a/hw/fsp/fsp-attn.c b/hw/fsp/fsp-attn.c
index 8a2ec92..7b56192 100644
--- a/hw/fsp/fsp-attn.c
+++ b/hw/fsp/fsp-attn.c
@@ -102,7 +102,7 @@ static void update_sp_attn_area(const char *msg)
init_sp_attn_area();
ti_attn->src_word[0] =
- (uint32_t)((uint64_t)__builtin_return_address(0) & 0xffffffff);
+ cpu_to_be32((uint32_t)((uint64_t)__builtin_return_address(0) & 0xffffffff));
snprintf(ti_attn->msg.version, VERSION_LEN, "%s", version);
ent_cnt = STACK_BUF_ENTRIES;
diff --git a/hw/fsp/fsp-chiptod.c b/hw/fsp/fsp-chiptod.c
index 148bfaa..567f5df 100644
--- a/hw/fsp/fsp-chiptod.c
+++ b/hw/fsp/fsp-chiptod.c
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#define pr_fmt(fmt) "CHIPTOD: " fmt
+
#include <skiboot.h>
#include <chiptod.h>
#include <fsp.h>
@@ -37,8 +39,8 @@ static bool fsp_chiptod_update_topology(uint32_t cmd_sub_mod,
*/
action = !!msg->data.bytes[2];
topo = msg->data.bytes[3];
- prlog(PR_DEBUG, "CHIPTOD: Topology update event\n");
- prlog(PR_DEBUG, "CHIPTOD: Action = %s, Topology = %s\n",
+ prlog(PR_DEBUG, "Topology update event:\n");
+ prlog(PR_DEBUG, " Action = %s, Topology = %s\n",
action ? "Enable" : "Disable",
topo ? "Secondary" : "Primary");
@@ -49,17 +51,16 @@ static bool fsp_chiptod_update_topology(uint32_t cmd_sub_mod,
resp = fsp_mkmsg(FSP_RSP_TOPO_ENABLE_DISABLE | status, 0);
if (!resp) {
- prerror("CHIPTOD: Response allocation failed\n");
+ prerror("Response allocation failed\n");
return false;
}
if (fsp_queue_msg(resp, fsp_freemsg)) {
fsp_freemsg(resp);
- prerror("CHIPTOD: Failed to queue response msg\n");
+ prerror("Failed to queue response msg\n");
}
return true;
default:
- prlog(PR_DEBUG,
- "CHIPTOD: Unhandled sub cmd: %06x\n", cmd_sub_mod);
+ prlog(PR_DEBUG, "Unhandled sub cmd: %06x\n", cmd_sub_mod);
break;
}
return false;
diff --git a/hw/fsp/fsp-dpo.c b/hw/fsp/fsp-dpo.c
index 8e88d8f..f6fadc5 100644
--- a/hw/fsp/fsp-dpo.c
+++ b/hw/fsp/fsp-dpo.c
@@ -13,14 +13,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-/*
- * Handle FSP DPO (Delayed Power Off) event notification
- */
-#define pr_fmt(fmt) "FSPDPO: " fmt
+
+/* FSP DPO (Delayed Power Off) event support */
+
+#define pr_fmt(fmt) "FSP-DPO: " fmt
+
#include <skiboot.h>
-#include <console.h>
#include <fsp.h>
-#include <device.h>
#include <stdio.h>
#include <timebase.h>
#include <opal.h>
@@ -30,8 +29,8 @@
#define DPO_CMD_SGN_BYTE1 0x20 /* Byte[1] signature */
#define DPO_TIMEOUT 2700 /* 45 minutes in seconds */
-bool fsp_dpo_pending = false;
-static unsigned long fsp_dpo_init_tb = 0;
+bool fsp_dpo_pending;
+static unsigned long fsp_dpo_init_tb;
/*
* OPAL DPO interface
@@ -42,13 +41,13 @@ static unsigned long fsp_dpo_init_tb = 0;
*/
static int64_t fsp_opal_get_dpo_status(int64_t *dpo_timeout)
{
- if (fsp_dpo_init_tb && fsp_dpo_pending) {
- *dpo_timeout = DPO_TIMEOUT - tb_to_secs(mftb() - fsp_dpo_init_tb);
- return OPAL_SUCCESS;
- } else {
+ if (!fsp_dpo_pending) {
*dpo_timeout = 0;
return OPAL_WRONG_STATE;
}
+
+ *dpo_timeout = DPO_TIMEOUT - tb_to_secs(mftb() - fsp_dpo_init_tb);
+ return OPAL_SUCCESS;
}
/* Process FSP DPO init message */
@@ -61,7 +60,7 @@ static void fsp_process_dpo(struct fsp_msg *msg)
/* DPO message does not have the correct signatures */
if ((msg->data.bytes[0] != DPO_CMD_SGN_BYTE0)
|| (msg->data.bytes[1] != DPO_CMD_SGN_BYTE1)) {
- prlog(PR_ERR, "Message signatures did not match\n");
+ prerror("Message signatures did not match\n");
cmd |= FSP_STATUS_INVALID_CMD;
resp = fsp_mkmsg(cmd, 0);
if (resp == NULL) {
@@ -76,14 +75,13 @@ static void fsp_process_dpo(struct fsp_msg *msg)
return;
}
- /* Sapphire is already in "DPO pending" state */
+ /* OPAL is already in "DPO pending" state */
if (fsp_dpo_pending) {
- prlog(PR_ERR, "OPAL is already in DPO pending state\n");
+ prlog(PR_INFO, "OPAL already in DPO pending state\n");
cmd |= FSP_STATUS_INVALID_DPOSTATE;
resp = fsp_mkmsg(cmd, 0);
if (resp == NULL) {
- prerror("%s : Message allocation failed\n",
- __func__);
+ prerror("%s : Message allocation failed\n", __func__);
return;
}
if (fsp_queue_msg(resp, fsp_freemsg)) {
@@ -94,18 +92,15 @@ static void fsp_process_dpo(struct fsp_msg *msg)
return;
}
- /* Record the DPO init time */
- fsp_dpo_init_tb = mftb();
/* Inform the host about DPO */
rc = opal_queue_msg(OPAL_MSG_DPO, NULL, NULL);
if (rc) {
- prlog(PR_ERR, "OPAL message queuing failed\n");
+ prerror("OPAL message queuing failed\n");
cmd |= FSP_STATUS_GENERIC_ERROR;
resp = fsp_mkmsg(cmd, 0);
if (resp == NULL) {
- prerror("%s : Message allocation failed\n",
- __func__);
+ prerror("%s : Message allocation failed\n", __func__);
return;
}
if (fsp_queue_msg(resp, fsp_freemsg)) {
@@ -114,7 +109,8 @@ static void fsp_process_dpo(struct fsp_msg *msg)
"message\n", __func__);
}
return;
- }
+ } else
+ prlog(PR_INFO, "Notified host about DPO event\n");
/* Acknowledge the FSP on DPO */
resp = fsp_mkmsg(cmd, 0);
@@ -124,21 +120,23 @@ static void fsp_process_dpo(struct fsp_msg *msg)
}
if (fsp_queue_msg(resp, fsp_freemsg)) {
fsp_freemsg(resp);
- prerror("%s : Failed to queue response message\n",
- __func__);
+ prerror("%s : Failed to queue response message\n", __func__);
+ return;
}
+ /* Record DPO init time and set DPO pending flag */
+ fsp_dpo_init_tb = mftb();
fsp_dpo_pending = true;
/*
- * Sapphire is now in DPO pending state. After first detecting DPO
- * condition from Sapphire, the host will have 45 minutes to prepare
+ * OPAL is now in DPO pending state. After first detecting DPO
+ * condition from OPAL, the host will have 45 minutes to prepare
* the system for shutdown. The host must take all necessary actions
* required in that regard and at the end shutdown itself. The host
* shutdown sequence eventually will make the call OPAL_CEC_POWER_DOWN
* which in turn ask the FSP to shutdown the CEC. If the FSP does not
- * receive the cec power down command from Sapphire within 45 minutes,
- * it will assume that the host and the Sapphire has processed the DPO
+ * receive the cec power down command from OPAL within 45 minutes,
+ * it will assume that the host and the OPAL has processed the DPO
* sequence successfully and hence force power off the system.
*/
}
@@ -147,10 +145,12 @@ static void fsp_process_dpo(struct fsp_msg *msg)
static bool fsp_dpo_message(u32 cmd_sub_mod, struct fsp_msg *msg)
{
if (cmd_sub_mod == FSP_CMD_INIT_DPO) {
- prlog(PR_TRACE, "SP initiated Delayed Power Off (DPO)\n");
+ prlog(PR_INFO, "Delayed Power Off (DPO) notification received\n");
fsp_process_dpo(msg);
return true;
}
+
+ prerror("Unknown command 0x%x\n", cmd_sub_mod);
return false;
}
@@ -162,5 +162,5 @@ void fsp_dpo_init(void)
{
fsp_register_client(&fsp_dpo_client, FSP_MCLASS_SERVICE);
opal_register(OPAL_GET_DPO_STATUS, fsp_opal_get_dpo_status, 1);
- prlog(PR_TRACE, "FSP DPO support initialized\n");
+ prlog(PR_INFO, "FSP DPO support initialized\n");
}
diff --git a/hw/fsp/fsp-dump.c b/hw/fsp/fsp-dump.c
index ae5bfee..b1dd090 100644
--- a/hw/fsp/fsp-dump.c
+++ b/hw/fsp/fsp-dump.c
@@ -419,7 +419,7 @@ static int64_t validate_dump_sglist(struct opal_sg_list *list,
for (i = 0; i < num_entries; i++) {
entry = &sg->entry[i];
- *size += entry->length;
+ *size += be64_to_cpu(entry->length);
/* All entries must be aligned */
if (((uint64_t)be64_to_cpu(entry->data)) & 0xfff)
diff --git a/hw/fsp/fsp-leds.c b/hw/fsp/fsp-leds.c
index 4933dfa..9ba588b 100644
--- a/hw/fsp/fsp-leds.c
+++ b/hw/fsp/fsp-leds.c
@@ -814,7 +814,6 @@ static void fsp_ret_loc_code_list(u16 req_type, char *loc_code)
}
/* Push the data into TCE buffer */
- bytes_sent = 0;
bytes_sent = fsp_push_data_to_tce(led, out_data, total_size);
/* Advance the TCE pointer */
@@ -827,7 +826,6 @@ static void fsp_ret_loc_code_list(u16 req_type, char *loc_code)
list_for_each_safe(&encl_ledq, led, next, link) {
/* Push the data into TCE buffer */
- bytes_sent = 0;
bytes_sent = fsp_push_data_to_tce(led,
out_data, total_size);
diff --git a/hw/fsp/fsp-mdst-table.c b/hw/fsp/fsp-mdst-table.c
index 9c71cda..e6018aa 100644
--- a/hw/fsp/fsp-mdst-table.c
+++ b/hw/fsp/fsp-mdst-table.c
@@ -103,9 +103,9 @@ static int dump_region_tce_map(void)
for (i = 0; i < cur_mdst_entry; i++) {
- addr = dump_mem_region[i].addr & ~TCE_MASK;
- size = get_dump_region_map_size(dump_mem_region[i].addr,
- dump_mem_region[i].size);
+ addr = be64_to_cpu(dump_mem_region[i].addr) & ~TCE_MASK;
+ size = get_dump_region_map_size(be64_to_cpu(dump_mem_region[i].addr),
+ be32_to_cpu(dump_mem_region[i].size));
if (t_size + size > max_dump_size)
break;
@@ -116,10 +116,11 @@ static int dump_region_tce_map(void)
/* Add entry to MDST table */
mdst_table[i].type = dump_mem_region[i].type;
mdst_table[i].size = dump_mem_region[i].size;
- mdst_table[i].addr = PSI_DMA_HYP_DUMP + t_size;
+ mdst_table[i].addr = cpu_to_be64(PSI_DMA_HYP_DUMP + t_size);
/* TCE alignment adjustment */
- mdst_table[i].addr += dump_mem_region[i].addr & 0xfff;
+ mdst_table[i].addr = cpu_to_be64(be64_to_cpu(mdst_table[i].addr) +
+ (be64_to_cpu(dump_mem_region[i].addr) & 0xfff));
t_size += size;
}
@@ -193,7 +194,7 @@ static int dump_region_del_entry(uint32_t id)
lock(&mdst_lock);
for (i = 0; i < cur_mdst_entry; i++) {
- if (dump_mem_region[i].type != id)
+ if (be32_to_cpu(dump_mem_region[i].type) != id)
continue;
found = true;
@@ -206,8 +207,8 @@ static int dump_region_del_entry(uint32_t id)
}
/* Adjust current dump size */
- size = get_dump_region_map_size(dump_mem_region[i].addr,
- dump_mem_region[i].size);
+ size = get_dump_region_map_size(be64_to_cpu(dump_mem_region[i].addr),
+ be32_to_cpu(dump_mem_region[i].size));
cur_dump_size -= size;
for ( ; i < cur_mdst_entry - 1; i++)
@@ -250,9 +251,9 @@ static int __dump_region_add_entry(uint32_t id, uint64_t addr, uint32_t size)
}
/* Add entry to dump memory region table */
- dump_mem_region[cur_mdst_entry].type = id;
- dump_mem_region[cur_mdst_entry].addr = addr;
- dump_mem_region[cur_mdst_entry].size = size;
+ dump_mem_region[cur_mdst_entry].type = cpu_to_be32(id);
+ dump_mem_region[cur_mdst_entry].addr = cpu_to_be64(addr);
+ dump_mem_region[cur_mdst_entry].size = cpu_to_be32(size);
/* Update dump region count and dump size */
cur_mdst_entry++;
diff --git a/hw/fsp/fsp-mem-err.c b/hw/fsp/fsp-mem-err.c
index 526afaf..a2b0619 100644
--- a/hw/fsp/fsp-mem-err.c
+++ b/hw/fsp/fsp-mem-err.c
@@ -159,7 +159,7 @@ static bool is_resilience_event_exist(u64 paddr)
list_for_each(&mem_error_list, entry, list) {
merr_evt = &entry->data;
if ((merr_evt->type == OPAL_MEM_ERR_TYPE_RESILIENCE) &&
- (merr_evt->u.resilience.physical_address_start
+ (be64_to_cpu(merr_evt->u.resilience.physical_address_start)
== paddr)) {
found = 1;
break;
@@ -212,7 +212,7 @@ static bool handle_memory_resilience(u32 cmd_sub_mod, u64 paddr)
* For now, send corrected errors to linux and let
* linux handle corrected errors thresholding.
*/
- mem_err_evt.flags |= OPAL_MEM_CORRECTED_ERROR;
+ mem_err_evt.flags |= cpu_to_be16(OPAL_MEM_CORRECTED_ERROR);
mem_err_evt.u.resilience.resil_err_type =
OPAL_MEM_RESILIENCE_CE;
break;
@@ -225,9 +225,9 @@ static bool handle_memory_resilience(u32 cmd_sub_mod, u64 paddr)
OPAL_MEM_RESILIENCE_UE_SCRUB;
break;
}
- mem_err_evt.u.resilience.physical_address_start = paddr;
+ mem_err_evt.u.resilience.physical_address_start = cpu_to_be64(paddr);
mem_err_evt.u.resilience.physical_address_end =
- paddr + MEM_ERR_PAGE_SIZE_4K;
+ cpu_to_be64(paddr + MEM_ERR_PAGE_SIZE_4K);
/* Queue up the event and inform OS about it. */
rc = queue_mem_err_node(&mem_err_evt);
@@ -260,13 +260,13 @@ static bool update_memory_deallocation_event(u64 paddr_start, u64 paddr_end)
list_for_each(&mem_error_list, entry, list) {
merr_evt = &entry->data;
if ((merr_evt->type == OPAL_MEM_ERR_TYPE_DYN_DALLOC) &&
- (merr_evt->u.dyn_dealloc.physical_address_start
+ (be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_start)
== paddr_start)) {
found = 1;
- if (merr_evt->u.dyn_dealloc.physical_address_end
+ if (be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_end)
< paddr_end)
- merr_evt->u.dyn_dealloc.physical_address_end
- = paddr_end;
+ merr_evt->u.dyn_dealloc.physical_address_end =
+ cpu_to_be64(paddr_end);
break;
}
}
@@ -315,8 +315,8 @@ static bool handle_memory_deallocation(u64 paddr_start, u64 paddr_end)
mem_err_evt.type = OPAL_MEM_ERR_TYPE_DYN_DALLOC;
mem_err_evt.u.dyn_dealloc.dyn_err_type =
OPAL_MEM_DYNAMIC_DEALLOC;
- mem_err_evt.u.dyn_dealloc.physical_address_start = paddr_start;
- mem_err_evt.u.dyn_dealloc.physical_address_end = paddr_end;
+ mem_err_evt.u.dyn_dealloc.physical_address_start = cpu_to_be64(paddr_start);
+ mem_err_evt.u.dyn_dealloc.physical_address_end = cpu_to_be64(paddr_end);
/* Queue up the event and inform OS about it. */
rc = queue_mem_err_node(&mem_err_evt);
@@ -353,13 +353,13 @@ static bool fsp_mem_err_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
* correctable/Uncorrectable/scrub UE errors with real
* address of 4K memory page in which the error occurred.
*/
- paddr_start = *((u64 *)&msg->data.words[0]);
+ paddr_start = be64_to_cpu(*((__be64 *)&msg->data.words[0]));
printf("Got memory resilience error message for "
"paddr=0x%016llux\n", paddr_start);
return handle_memory_resilience(cmd_sub_mod, paddr_start);
case FSP_CMD_MEM_DYN_DEALLOC:
- paddr_start = *((u64 *)&msg->data.words[0]);
- paddr_end = *((u64 *)&msg->data.words[2]);
+ paddr_start = be64_to_cpu(*((__be64 *)&msg->data.words[0]));
+ paddr_end = be64_to_cpu(*((__be64 *)&msg->data.words[2]));
printf("Got dynamic memory deallocation message: "
"paddr_start=0x%016llux, paddr_end=0x%016llux\n",
paddr_start, paddr_end);
diff --git a/hw/fsp/fsp-sensor.c b/hw/fsp/fsp-sensor.c
index d83f1a1..a2605d6 100644
--- a/hw/fsp/fsp-sensor.c
+++ b/hw/fsp/fsp-sensor.c
@@ -409,7 +409,6 @@ static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr)
{
int rc;
struct fsp_msg *msg;
- uint32_t *sensor_buf_ptr;
uint32_t align;
uint32_t cmd_header;
@@ -418,12 +417,9 @@ static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr)
if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
/* TODO Support this modifier '0x14', if required */
- align = attr->offset % sizeof(*sensor_buf_ptr);
+ align = attr->offset % sizeof(uint32_t);
if (align)
- attr->offset += (sizeof(*sensor_buf_ptr) - align);
-
- sensor_buf_ptr = (uint32_t *)((uint8_t *)sensor_buffer +
- attr->offset);
+ attr->offset += (sizeof(uint32_t) - align);
/* TODO Add 8 byte command data required for mod 0x14 */
@@ -712,7 +708,6 @@ void fsp_init_sensor(void)
{
uint32_t cmd_header, align, size, psi_dma_offset = 0;
enum spcn_rsp_status status;
- uint32_t *sensor_buf_ptr;
struct fsp_msg msg, resp;
int index, rc;
@@ -741,12 +736,9 @@ void fsp_init_sensor(void)
spcn_mod_data[index].mod);
if (spcn_mod_data[index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
/* TODO Support this modifier 0x14, if required */
- align = psi_dma_offset % sizeof(*sensor_buf_ptr);
+ align = psi_dma_offset % sizeof(uint32_t);
if (align)
- psi_dma_offset += (sizeof(*sensor_buf_ptr) - align);
-
- sensor_buf_ptr = (uint32_t *)((uint8_t *)sensor_buffer
- + psi_dma_offset);
+ psi_dma_offset += (sizeof(uint32_t) - align);
/* TODO Add 8 byte command data required for mod 0x14 */
psi_dma_offset += 8;
diff --git a/hw/ipmi/ipmi-rtc.c b/hw/ipmi/ipmi-rtc.c
index 01fb3e1..ba6f977 100644
--- a/hw/ipmi/ipmi-rtc.c
+++ b/hw/ipmi/ipmi-rtc.c
@@ -34,7 +34,7 @@ static void get_sel_time_error(struct ipmi_msg *msg)
static void get_sel_time_complete(struct ipmi_msg *msg)
{
struct tm tm;
- uint32_t result;
+ le32 result;
time_t time;
memcpy(&result, msg->data, 4);
@@ -59,12 +59,12 @@ static int64_t ipmi_get_sel_time(void)
return ipmi_queue_msg(msg);
}
-static int64_t ipmi_set_sel_time(uint32_t tv)
+static int64_t ipmi_set_sel_time(uint32_t _tv)
{
struct ipmi_msg *msg;
+ const le32 tv = cpu_to_le32(_tv);
- tv = cpu_to_le32(tv);
- msg = ipmi_mkmsg_simple(IPMI_SET_SEL_TIME, &tv, sizeof(tv));
+ msg = ipmi_mkmsg_simple(IPMI_SET_SEL_TIME, (void*)&tv, sizeof(tv));
if (!msg)
return OPAL_HARDWARE;
diff --git a/hw/ipmi/ipmi-sel.c b/hw/ipmi/ipmi-sel.c
index a179513..4610829 100644
--- a/hw/ipmi/ipmi-sel.c
+++ b/hw/ipmi/ipmi-sel.c
@@ -117,8 +117,8 @@ struct oem_sel {
/* SEL header */
uint8_t id[2];
uint8_t type;
- uint8_t manuf_id[3];
uint8_t timestamp[4];
+ uint8_t manuf_id[3];
/* OEM SEL data (6 bytes) follows */
uint8_t netfun;
uint8_t cmd;
diff --git a/hw/ipmi/ipmi-sensor.c b/hw/ipmi/ipmi-sensor.c
index c8723ae..dd63986 100644
--- a/hw/ipmi/ipmi-sensor.c
+++ b/hw/ipmi/ipmi-sensor.c
@@ -25,7 +25,8 @@
#define FW_PROGRESS_SENSOR_TYPE 0x0F
#define BOOT_COUNT_SENSOR_TYPE 0xC3
-static int16_t sensors[255];
+#define MAX_IPMI_SENSORS 255
+static int16_t sensors[MAX_IPMI_SENSORS];
struct set_sensor_req {
u8 sensor_number;
@@ -38,6 +39,7 @@ struct set_sensor_req {
uint8_t ipmi_get_sensor_number(uint8_t sensor_type)
{
+ assert(sensor_type < MAX_IPMI_SENSORS);
return sensors[sensor_type];
}
@@ -125,6 +127,7 @@ void ipmi_sensor_init(void)
}
num = (uint8_t)dt_property_get_cell(num_prop, 0);
type = (uint8_t)dt_property_get_cell(type_prop, 0);
+ assert(type < MAX_IPMI_SENSORS);
sensors[type] = num;
}
}
diff --git a/hw/lpc-rtc.c b/hw/lpc-rtc.c
index 63124df..95506cd 100644
--- a/hw/lpc-rtc.c
+++ b/hw/lpc-rtc.c
@@ -119,6 +119,8 @@ static void lpc_init_time(void)
struct tm tm;
bool valid;
+ memset(&tm, 0, sizeof(tm));
+
lock(&rtc_lock);
/* If update is in progress, wait a bit */
diff --git a/hw/lpc-uart.c b/hw/lpc-uart.c
index 2a95da5..bba6354 100644
--- a/hw/lpc-uart.c
+++ b/hw/lpc-uart.c
@@ -74,7 +74,7 @@ static void uart_trace(u8 ctx, u8 cnt, u8 irq_state, u8 in_count)
t.uart.ctx = ctx;
t.uart.cnt = cnt;
t.uart.irq_state = irq_state;
- t.uart.in_count = in_count;
+ t.uart.in_count = cpu_to_be16(in_count);
trace_add(&t, TRACE_UART, sizeof(struct trace_uart));
}
diff --git a/hw/lpc.c b/hw/lpc.c
index 0e88e9f..60fefdb 100644
--- a/hw/lpc.c
+++ b/hw/lpc.c
@@ -531,9 +531,16 @@ static void lpc_setup_serirq(struct proc_chip *chip)
{
u32 val;
rc = opb_read(chip, lpc_reg_opb_base + LPC_HC_IRQMASK, &val, 4);
- DBG_IRQ("LPC: MASK READBACK=%x\n", val);
+ if (rc)
+ prerror("LPC: failed to readback mask");
+ else
+ DBG_IRQ("LPC: MASK READBACK=%x\n", val);
+
rc = opb_read(chip, lpc_reg_opb_base + LPC_HC_IRQSER_CTRL, &val, 4);
- DBG_IRQ("LPC: CTRL READBACK=%x\n", val);
+ if (rc)
+ prerror("LPC: failed to readback ctrl");
+ else
+ DBG_IRQ("LPC: CTRL READBACK=%x\n", val);
}
}
diff --git a/hw/npu-hw-procedures.c b/hw/npu-hw-procedures.c
new file mode 100644
index 0000000..ba87d43
--- /dev/null
+++ b/hw/npu-hw-procedures.c
@@ -0,0 +1,602 @@
+/* Copyright 2013-2015 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <io.h>
+#include <timebase.h>
+#include <pci.h>
+#include <interrupts.h>
+#include <lock.h>
+#include <npu-regs.h>
+#include <npu.h>
+#include <xscom.h>
+
+typedef uint32_t (*step)(struct npu_dev *);
+
+struct procedure {
+ const char *name;
+ step steps[];
+};
+
+#define DEFINE_PROCEDURE(NAME, STEPS...) \
+ struct procedure procedure_##NAME = \
+ {.name = #NAME, .steps = {NAME, ##STEPS}}
+
+#define PROCEDURE_INPROGRESS (1 << 31)
+#define PROCEDURE_COMPLETE (1 << 30)
+#define PROCEDURE_NEXT (1 << 29)
+#define PROCEDURE_FAILED 2
+#define PROCEDURE_ABORTED 3
+#define PROCEDURE_UNSUPPORTED 4
+
+/* Mask defining which status bits we want to expose */
+#define PROCEDURE_STATUS_MASK 0xc000000f
+
+/* Accesors for PHY registers. These can be done either via MMIO or SCOM. */
+static bool pl_use_scom = 1;
+static void phy_write(struct npu_dev *npu_dev, uint64_t addr, uint32_t val)
+{
+ if (pl_use_scom)
+ xscom_write(npu_dev->npu->chip_id, npu_dev->pl_xscom_base | addr, val);
+ else
+ out_be16((void *) npu_dev->pl_base + PL_MMIO_ADDR(addr), val);
+}
+
+static uint16_t phy_read(struct npu_dev *npu_dev, uint64_t addr)
+{
+ uint64_t val;
+
+ if (pl_use_scom)
+ xscom_read(npu_dev->npu->chip_id, npu_dev->pl_xscom_base + addr, &val);
+ else
+ val = in_be16((void *) npu_dev->pl_base + PL_MMIO_ADDR(addr));
+
+ return val & 0xffff;
+}
+
+/* The DL registers can be accessed indirectly via the NTL */
+static void dl_write(struct npu_dev *npu_dev, uint32_t addr, uint32_t val)
+{
+ xscom_write(npu_dev->npu->chip_id,
+ npu_dev->xscom + NX_DL_REG_ADDR, addr);
+ xscom_write(npu_dev->npu->chip_id,
+ npu_dev->xscom + NX_DL_REG_DATA, val);
+}
+
+static uint64_t __unused dl_read(struct npu_dev *npu_dev, uint32_t addr)
+{
+ uint64_t val;
+
+ xscom_write(npu_dev->npu->chip_id,
+ npu_dev->xscom + NX_DL_REG_ADDR, addr);
+ xscom_read(npu_dev->npu->chip_id,
+ npu_dev->xscom + NX_DL_REG_DATA, &val);
+ return val;
+}
+
+/* Our hardware bits are backwards here. The lane vectors are 16-bit
+ * values represented in IBM bit ordering. This means lane 0 is
+ * represented by bit 15 in most of the registers. Internally we keep
+ * this sane (ie. npu_dev->lane_mask[0] == lane 0) as we need sane
+ * numbering for set_lane_reg() anyway. */
+static uint32_t phy_lane_mask(struct npu_dev *npu_dev)
+{
+ /* We only train 8 lanes at a time so we don't do a full
+ * bit-swap */
+ assert(npu_dev->lane_mask == 0xff00 || npu_dev->lane_mask == 0xff);
+
+ return ~npu_dev->lane_mask & 0xffff;
+}
+
+static void set_lane_reg(struct npu_dev *npu_dev, uint64_t base_reg,
+ uint64_t data, uint64_t mask)
+{
+ uint64_t val, i;
+ uint32_t lane_mask = npu_dev->lane_mask;
+
+ for (i = 0; i <= 23; i++) {
+ if (lane_mask & (1ul << i)) {
+ uint64_t tx_rxcal_reg = base_reg + (i << 32);
+ val = phy_read(npu_dev, tx_rxcal_reg);
+ val = (val & ~mask) | data;
+ phy_write(npu_dev, tx_rxcal_reg, val);
+ }
+ }
+}
+
+static uint32_t stop(struct npu_dev *npu_dev __unused)
+{
+ return PROCEDURE_COMPLETE | PROCEDURE_ABORTED;
+}
+DEFINE_PROCEDURE(stop);
+
+static uint32_t nop(struct npu_dev *npu_dev __unused)
+{
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(nop);
+
+/* Procedure 1.2.1 (RESET_NPU_DL) from opt_programmerguide.odt. Also
+ * incorporates AT reset. */
+static uint32_t reset_npu_dl(struct npu_dev *npu_dev)
+{
+ uint64_t val;
+
+ /* Assert NPU reset */
+ xscom_read(npu_dev->npu->chip_id, npu_dev->xscom + NX_NTL_CONTROL, &val);
+ val |= NTL_CONTROL_RESET;
+ xscom_write(npu_dev->npu->chip_id, npu_dev->xscom + NX_NTL_CONTROL, val);
+
+ /* Put the Nvidia logic in reset */
+ dl_write(npu_dev, NDL_CONTROL, 0xe8000000);
+
+ /* Release Nvidia logic from reset */
+ dl_write(npu_dev, NDL_CONTROL, 0);
+
+ /* Release NPU from reset */
+ val &= ~NTL_CONTROL_RESET;
+ xscom_write(npu_dev->npu->chip_id, npu_dev->xscom + NX_NTL_CONTROL, val);
+
+ /* Setup up TL credits */
+ xscom_write(npu_dev->npu->chip_id, npu_dev->xscom + NX_TL_CMD_CR, PPC_BIT(0));
+ xscom_write(npu_dev->npu->chip_id, npu_dev->xscom + NX_TL_CMD_D_CR, PPC_BIT(0));
+ xscom_write(npu_dev->npu->chip_id, npu_dev->xscom + NX_TL_RSP_CR, PPC_BIT(15));
+ xscom_write(npu_dev->npu->chip_id, npu_dev->xscom + NX_TL_RSP_D_CR, PPC_BIT(15));
+
+ /* Reset error registers. TODO: are there more we should clear here? */
+ npu_ioda_sel(npu_dev->npu, NPU_IODA_TBL_PESTB, 0, true);
+ for (val = 0; val < NPU_NUM_OF_PES; val++)
+ out_be64(npu_dev->npu->at_regs + NPU_IODA_DATA0, 0);
+
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(reset_npu_dl);
+
+/* Procedures 1.2.3 (reset_lanes) & 1.2.4
+ * (io_register_write_reset_values) */
+static uint32_t phy_reset(struct npu_dev *npu_dev)
+{
+ uint16_t val;
+
+ /* Lower run_lane inputs for lanes to be reset */
+ val = phy_read(npu_dev, RX_RUN_LANE_VEC_0_15);
+ val &= ~phy_lane_mask(npu_dev);
+ phy_write(npu_dev, RX_RUN_LANE_VEC_0_15, val);
+
+ return PROCEDURE_NEXT;
+}
+
+static uint32_t phy_reset_wait(struct npu_dev *npu_dev)
+{
+ uint16_t val;
+
+ /* Wait for lane busy outputs to go to zero for lanes to be
+ * reset */
+ val = phy_read(npu_dev, RX_LANE_BUSY_VEC_0_15);
+ if (val & phy_lane_mask(npu_dev))
+ return PROCEDURE_INPROGRESS;
+
+ return PROCEDURE_NEXT;
+}
+
+static uint32_t phy_reset_complete(struct npu_dev *npu_dev)
+{
+ uint16_t val;
+ uint32_t lane_mask = phy_lane_mask(npu_dev);
+
+ /* Set ioreset_vec for the desired lanes bit positions */
+ val = phy_read(npu_dev, RX_IORESET_VEC_0_15);
+ phy_write(npu_dev, RX_IORESET_VEC_0_15, val | lane_mask);
+
+ val = phy_read(npu_dev, TX_IORESET_VEC_0_15);
+ phy_write(npu_dev, TX_IORESET_VEC_0_15, val | lane_mask);
+
+ /* Clear ioreset_vec */
+ val = phy_read(npu_dev, RX_IORESET_VEC_0_15);
+ phy_write(npu_dev, RX_IORESET_VEC_0_15, val & ~lane_mask);
+
+ val = phy_read(npu_dev, TX_IORESET_VEC_0_15);
+ phy_write(npu_dev, TX_IORESET_VEC_0_15, val & ~lane_mask);
+
+ /* Reset RX phase rotators */
+ set_lane_reg(npu_dev, RX_PR_CNTL_PL, RX_PR_RESET, RX_PR_RESET);
+ set_lane_reg(npu_dev, RX_PR_CNTL_PL, 0, RX_PR_RESET);
+
+ /* Restore registers from scominit that may have changed */
+ set_lane_reg(npu_dev, RX_PR_MODE, 0x8, RX_PR_PHASE_STEP);
+ set_lane_reg(npu_dev, RX_A_DAC_CNTL,
+ 0x7 << MASK_TO_LSH(RX_PR_IQ_RES_SEL),
+ RX_PR_IQ_RES_SEL);
+ set_lane_reg(npu_dev, TX_MODE1_PL, 0, TX_LANE_PDWN);
+ set_lane_reg(npu_dev, RX_BANK_CONTROLS, 0, RX_LANE_ANA_PDWN);
+ set_lane_reg(npu_dev, RX_MODE, 0, RX_LANE_DIG_PDWN);
+
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(phy_reset, phy_reset_wait, phy_reset_complete);
+
+/* Round a fixed decimal number. Frac is the number of fractional
+ * bits */
+static uint32_t round(uint32_t val, int frac)
+{
+ if (val >> (frac - 1) & 0x1)
+ return (val >> frac) + 1;
+ else
+ return val >> frac;
+}
+
+#define ZCAL_MIN (10 << 3)
+#define ZCAL_MAX (40 << 3)
+#define ZCAL_K0 0x0
+#define ZCAL_M 128
+/* TODO: add a test case for the following values:
+
+ Initial values:
+ zcal_n = 0xda;
+ zcal_p = 0xc7;
+
+ Results:
+ pre_p = 0x0
+ pre_n = 0x0
+ margin_p = 0x0
+ margin_n = 0x0
+ total_en_p = 0x32
+ total_en_n = 0x37
+ */
+
+static uint32_t phy_tx_zcal(struct npu_dev *npu_dev)
+{
+ uint64_t val;
+
+ if (npu_dev->index < 2 && npu_dev->npu->tx_zcal_complete[0])
+ return PROCEDURE_COMPLETE;
+
+ if (npu_dev->index >= 2 && npu_dev->npu->tx_zcal_complete[1])
+ return PROCEDURE_COMPLETE;
+
+ /* Start calibration */
+ val = phy_read(npu_dev, TX_IMPCAL_SWO1_PB);
+ val &= TX_ZCAL_SWO_EN;
+ phy_write(npu_dev, TX_IMPCAL_SWO1_PB, val);
+ phy_write(npu_dev, TX_IMPCAL_SWO2_PB, 0x50 << 2);
+ val = phy_read(npu_dev, TX_IMPCAL_PB);
+ val |= TX_ZCAL_REQ;
+ phy_write(npu_dev, TX_IMPCAL_PB, val);
+
+ return PROCEDURE_NEXT;
+}
+
+static uint32_t phy_tx_zcal_wait(struct npu_dev *npu_dev)
+{
+ uint64_t val;
+
+ val = phy_read(npu_dev, TX_IMPCAL_PB);
+ if (!(val & TX_ZCAL_DONE))
+ return PROCEDURE_INPROGRESS;
+
+ if (val & TX_ZCAL_ERROR)
+ return PROCEDURE_COMPLETE | PROCEDURE_FAILED;
+
+ return PROCEDURE_NEXT;
+}
+
+static uint32_t phy_tx_zcal_calculate(struct npu_dev *npu_dev)
+{
+ uint64_t val;
+ uint64_t zcal_n;
+ uint64_t zcal_p;
+ uint64_t margin_n;
+ uint64_t margin_p;
+ uint64_t pre_n;
+ uint64_t pre_p;
+ uint64_t total_en_n;
+ uint64_t total_en_p;
+
+ val = phy_read(npu_dev, TX_IMPCAL_NVAL_PB);
+ zcal_n = GETFIELD(TX_ZCAL_N, val);
+ val = phy_read(npu_dev, TX_IMPCAL_PVAL_PB);
+ zcal_p = GETFIELD(TX_ZCAL_P, val);
+
+ if ((zcal_n < ZCAL_MIN) || (zcal_n > ZCAL_MAX) ||
+ (zcal_p < ZCAL_MIN) || (zcal_p > ZCAL_MAX))
+ return PROCEDURE_COMPLETE | PROCEDURE_FAILED;
+
+ margin_n = (0x80 - ZCAL_M) * zcal_n / 2;
+ margin_p = (0x80 - ZCAL_M) * zcal_p / 2;
+ pre_n = (((0x80 * zcal_n) - (2 * margin_n)) * ZCAL_K0) / 0x80;
+ pre_p = (((0x80 * zcal_p) - (2 * margin_p)) * ZCAL_K0) / 0x80;
+
+ total_en_n = 0x80 * zcal_n - (2 * margin_n) - (pre_n & 1023);
+ total_en_p = 0x80 * zcal_p - (2 * margin_p) - (pre_p & 1023);
+
+ pre_p = round(pre_p, 9);
+ pre_n = round(pre_n, 9);
+ margin_p = round(margin_p, 9);
+ margin_n = round(margin_n, 9);
+ total_en_p = round(total_en_p, 9);
+ total_en_n = round(total_en_n, 9);
+
+ val = SETFIELD(TX_FFE_TOTAL_ENABLE_N_ENC, 0, total_en_n);
+ val = SETFIELD(TX_FFE_TOTAL_ENABLE_P_ENC, val, total_en_p);
+ phy_write(npu_dev, TX_FFE_TOTAL_2RSTEP_EN, val);
+
+ val = SETFIELD(TX_FFE_PRE_N_SEL_ENC, 0, pre_n);
+ val = SETFIELD(TX_FFE_PRE_P_SEL_ENC, val, pre_p);
+ phy_write(npu_dev, TX_FFE_PRE_2RSTEP_SEL, val);
+
+ val = SETFIELD(TX_FFE_MARGIN_PD_N_SEL_ENC, 0, margin_n);
+ val = SETFIELD(TX_FFE_MARGIN_PU_P_SEL_ENC, val, margin_p);
+ phy_write(npu_dev, TX_FFE_MARGIN_2RSTEP_SEL, val);
+
+ if (npu_dev->index < 2)
+ npu_dev->npu->tx_zcal_complete[0] = true;
+ else
+ npu_dev->npu->tx_zcal_complete[1] = true;
+
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(phy_tx_zcal, phy_tx_zcal_wait, phy_tx_zcal_calculate);
+
+static uint32_t phy_enable_tx_rxcal(struct npu_dev *npu_dev)
+{
+ /* Turn common mode on */
+ set_lane_reg(npu_dev, TX_MODE2_PL, TX_RXCAL, TX_RXCAL);
+
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(phy_enable_tx_rxcal);
+
+static uint32_t phy_disable_tx_rxcal(struct npu_dev *npu_dev)
+{
+ /* Turn common mode off */
+ set_lane_reg(npu_dev, TX_MODE2_PL, 0, TX_RXCAL);
+
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(phy_disable_tx_rxcal);
+
+static uint32_t phy_rx_dccal(struct npu_dev *npu_dev)
+{
+ if (phy_read(npu_dev, RX_LANE_BUSY_VEC_0_15)
+ & ~phy_read(npu_dev, RX_INIT_DONE_VEC_0_15))
+ return PROCEDURE_INPROGRESS;
+
+ return PROCEDURE_NEXT;
+}
+
+static uint32_t phy_rx_dccal_start(struct npu_dev *npu_dev)
+{
+ uint64_t val;
+
+ /* Save EO step control */
+ val = phy_read(npu_dev, RX_EO_STEP_CNTL_PG);
+ npu_dev->procedure_data = val;
+
+ phy_write(npu_dev, RX_EO_STEP_CNTL_PG,
+ RX_EO_ENABLE_LATCH_OFFSET_CAL
+ | RX_EO_ENABLE_CM_COARSE_CAL);
+
+ val = phy_read(npu_dev, RX_RECAL_ABORT_VEC_0_15);
+ val |= phy_lane_mask(npu_dev);
+ phy_write(npu_dev, RX_RECAL_ABORT_VEC_0_15, val);
+
+ val = phy_read(npu_dev, RX_RUN_LANE_VEC_0_15);
+ val |= phy_lane_mask(npu_dev);
+ phy_write(npu_dev, RX_RUN_LANE_VEC_0_15, val);
+
+ return PROCEDURE_NEXT;
+}
+
+static uint32_t phy_rx_dccal_complete(struct npu_dev *npu_dev)
+{
+ /* Poll for completion on relevant lanes */
+ if ((phy_read(npu_dev, RX_INIT_DONE_VEC_0_15) & phy_lane_mask(npu_dev))
+ != phy_lane_mask(npu_dev))
+ return PROCEDURE_INPROGRESS;
+
+ return PROCEDURE_NEXT;
+}
+
+static uint32_t phy_rx_dccal_fifo_init(struct npu_dev *npu_dev)
+{
+ uint64_t val;
+
+ val = phy_read(npu_dev, RX_RUN_LANE_VEC_0_15);
+ val &= ~phy_lane_mask(npu_dev);
+ phy_write(npu_dev, RX_RUN_LANE_VEC_0_15, val);
+
+ /* Turn off recal abort */
+ val = phy_read(npu_dev, RX_RECAL_ABORT_VEC_0_15);
+ val &= ~phy_lane_mask(npu_dev);
+ phy_write(npu_dev, RX_RECAL_ABORT_VEC_0_15, val);
+
+ /* Restore original settings */
+ phy_write(npu_dev, RX_EO_STEP_CNTL_PG, npu_dev->procedure_data);
+
+ /* FIFO Init */
+ set_lane_reg(npu_dev, TX_MODE2_PL, 0, TX_UNLOAD_CLK_DISABLE);
+ set_lane_reg(npu_dev, TX_CNTL_STAT2, TX_FIFO_INIT, TX_FIFO_INIT);
+ set_lane_reg(npu_dev, TX_MODE2_PL, TX_UNLOAD_CLK_DISABLE,
+ TX_UNLOAD_CLK_DISABLE);
+
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(phy_rx_dccal, phy_rx_dccal_start, phy_rx_dccal_complete,
+ phy_rx_dccal_fifo_init);
+
+static uint32_t phy_rx_training(struct npu_dev *npu_dev)
+{
+ uint16_t val;
+
+ if (!npu_dev->procedure_data) {
+ val = phy_read(npu_dev, RX_RUN_LANE_VEC_0_15);
+ val |= phy_lane_mask(npu_dev);
+ phy_write(npu_dev, RX_RUN_LANE_VEC_0_15, val);
+ }
+
+ npu_dev->procedure_data++;
+ if (npu_dev->procedure_data >= 1000000)
+ return PROCEDURE_COMPLETE | PROCEDURE_FAILED;
+
+ val = phy_read(npu_dev, RX_RUN_LANE_VEC_0_15);
+ if ((val & phy_lane_mask(npu_dev)) != phy_lane_mask(npu_dev))
+ return PROCEDURE_INPROGRESS;
+
+ return PROCEDURE_COMPLETE;
+}
+DEFINE_PROCEDURE(phy_rx_training);
+
+static struct procedure *npu_procedures[] = {
+ &procedure_stop,
+ &procedure_nop,
+ NULL,
+ NULL,
+ &procedure_phy_reset,
+ &procedure_phy_tx_zcal,
+ &procedure_phy_rx_dccal,
+ &procedure_phy_enable_tx_rxcal,
+ &procedure_phy_disable_tx_rxcal,
+ &procedure_phy_rx_training,
+ &procedure_reset_npu_dl,
+
+ /* Place holders for pre-terminate and terminate procedures */
+ &procedure_nop,
+ &procedure_nop};
+
+/* Run a procedure step(s) and return status */
+static uint32_t get_procedure_status(struct npu_dev *dev)
+{
+ uint32_t result;
+ uint16_t procedure = dev->procedure_number;
+ uint16_t step = dev->procedure_step;
+ const char *name = npu_procedures[procedure]->name;
+
+ do {
+ result = npu_procedures[procedure]->steps[step](dev);
+
+ if (result & PROCEDURE_NEXT) {
+ step++;
+ NPUDEVINF(dev, "Running procedure %s step %d\n", name, step);
+ }
+ } while (result & PROCEDURE_NEXT);
+
+ dev->procedure_step = step;
+
+ if (result & PROCEDURE_COMPLETE)
+ NPUDEVINF(dev, "Procedure %s complete\n", name);
+ else if (mftb() > dev->procedure_tb + msecs_to_tb(100)) {
+ NPUDEVINF(dev, "Procedure %s timed out\n", name);
+ result = PROCEDURE_COMPLETE | PROCEDURE_FAILED;
+ }
+
+ /* Mask off internal state bits */
+ dev->procedure_status = result & PROCEDURE_STATUS_MASK;
+
+ return dev->procedure_status;
+}
+
+int64_t npu_dev_procedure_read(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t *data)
+{
+ struct npu_dev *dev = trap->dev;
+ int64_t rc = OPAL_SUCCESS;
+
+ if (size != 4) {
+ /* Short config reads are not supported */
+ NPUDEVERR(dev, "Short read of procedure register\n");
+ return OPAL_PARAMETER;
+ }
+
+ offset -= trap->start;
+ *data = 0;
+
+ switch (offset) {
+ case 0:
+ /* Only run the procedure if not already complete */
+ if (dev->procedure_status & PROCEDURE_COMPLETE)
+ *data = dev->procedure_status;
+ else
+ *data = get_procedure_status(dev);
+
+ break;
+
+ case 4:
+ *data = dev->procedure_number;
+ break;
+
+ default:
+ NPUDEVERR(dev, "Invalid vendor specific offset 0x%08x\n",
+ offset);
+ rc = OPAL_PARAMETER;
+ }
+
+ return rc;
+}
+
+int64_t npu_dev_procedure_write(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t data)
+{
+ struct npu_dev *dev = trap->dev;
+ const char *name;
+ int64_t rc = OPAL_SUCCESS;
+
+ if (size != 4) {
+ /* Short config writes are not supported */
+ NPUDEVERR(dev, "Short read of procedure register\n");
+ return OPAL_PARAMETER;
+ }
+
+ offset -= trap->start;
+
+ switch (offset) {
+ case 0:
+ /* We ignore writes to the status register */
+ NPUDEVINF(dev, "Ignoring writes to status register\n");
+ break;
+
+ case 4:
+ if (data >= ARRAY_SIZE(npu_procedures) ||
+ !npu_procedures[data]) {
+ NPUDEVINF(dev, "Unsupported procedure number %d\n", data);
+ dev->procedure_status = PROCEDURE_COMPLETE
+ | PROCEDURE_UNSUPPORTED;
+ break;
+ }
+
+ name = npu_procedures[data]->name;
+ if (dev->procedure_number == data
+ && !(dev->procedure_status & PROCEDURE_COMPLETE))
+ NPUDEVINF(dev, "Restarting procuedure %s\n", name);
+ else
+ NPUDEVINF(dev, "Starting procedure %s\n", name);
+
+ dev->procedure_status = PROCEDURE_INPROGRESS;
+ dev->procedure_number = data;
+ dev->procedure_step = 0;
+ dev->procedure_data = 0;
+ dev->procedure_tb = mftb();
+ break;
+
+ default:
+ NPUDEVINF(dev, "Invalid vendor specific offset 0x%08x\n", offset);
+ rc = OPAL_PARAMETER;
+ }
+
+ return rc;
+}
diff --git a/hw/npu.c b/hw/npu.c
new file mode 100644
index 0000000..a3898b1
--- /dev/null
+++ b/hw/npu.c
@@ -0,0 +1,1825 @@
+/* Copyright 2013-2015 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <io.h>
+#include <timebase.h>
+#include <pci.h>
+#include <pci-cfg.h>
+#include <interrupts.h>
+#include <opal.h>
+#include <opal-api.h>
+#include <cpu.h>
+#include <device.h>
+#include <ccan/str/str.h>
+#include <ccan/array_size/array_size.h>
+#include <affinity.h>
+#include <npu-regs.h>
+#include <npu.h>
+#include <lock.h>
+#include <xscom.h>
+
+/*
+ * Terminology:
+ *
+ * Brick - A group of either 8 TX or 8 RX lanes
+ * Link - A group of 8 TX and 8 RX lanes
+ *
+ * Each link is represented in system software as an emulated PCI
+ * device. Garrison has two chips each with 4 links, therefore there
+ * are 8 emulated PCI devices in total.
+ *
+ * +----------------------------------------------------------------+
+ * | PBCQ3 (SCOM Base Address 0x2012c00) |
+ * | PHB3 (SCOM Base Address 0x9012c00) |
+ * +----------------------------------------------------------------+
+ * |||||||| ||||||||
+ * |||||||| ||||||||
+ * |||||||| ||||||||
+ * |||||||| ||||||||
+ * +----------------------------------------------------------------+
+ * | PCIe x8 |
+ * +----------------------------------------------------------------+
+ * | GPU0 |
+ * +--------------------------------+-------------------------------+
+ * | NV Link 1 | NV Link 0 |
+ * +---------------+----------------+---------------+---------------+
+ * | RX | TX | RX | TX |
+ * +---------------+----------------+---------------+---------------+
+ * |||||||| |||||||| |||||||| ||||||||
+ * |||||||| |||||||| |||||||| ||||||||
+ * |||||||| |||||||| |||||||| ||||||||
+ * |||||||| |||||||| |||||||| ||||||||
+ * +---------------+----------------+---------------+---------------+
+ * | TX | RX | TX | RX |
+ * +---------------+----------------+---------------+---------------+
+ * | Lanes [0:7] PHY 0 Lanes [8:15] |
+ * | SCOM Base Address 0x8000080008010c3f |
+ * +--------------------------------+-------------------------------+
+ * | Link 0 NDL/NTL | Link 1 NTL/NDL |
+ * | SCOM Base Address 0x8013c00 | SCOM Base Address 0x8013c40 |
+ * +--------------------------------+-------------------------------+
+ * | |
+ * | Address Translation/AT (shared for all links) |
+ * | SCOM Base Address 0x8013d80 |
+ * | |
+ * +--------------------------------+-------------------------------+
+ * | Link 3 NDL/NTL | Link 4 NTL/NDL |
+ * | SCOM Base Address 0x8013d00 | SCOM Base Address 0x8013d40 |
+ * +--------------------------------+-------------------------------+
+ * | Lanes [8:15] PHY 1 Lanes [0:7] |
+ * | SCOM Base Address 0x8000080008010c7f |
+ * +---------------+----------------+---------------+---------------+
+ * | TX | RX | TX | RX |
+ * +---------------+----------------+---------------+---------------+
+ * |||||||| |||||||| |||||||| ||||||||
+ * |||||||| |||||||| |||||||| ||||||||
+ * |||||||| |||||||| |||||||| ||||||||
+ * |||||||| |||||||| |||||||| ||||||||
+ * +---------------+----------------+---------------+---------------+
+ * | RX | TX | RX | TX |
+ * +---------------+----------------+---------------+---------------+
+ * | NV Link 2 | NV Link 3 |
+ * +--------------------------------+-------------------------------+
+ * | GPU1 |
+ * +----------------------------------------------------------------+
+ * | PCIe x8 |
+ * +----------------------------------------------------------------+
+ * |||||||| ||||||||
+ * |||||||| ||||||||
+ * |||||||| ||||||||
+ * |||||||| ||||||||
+ * +----------------------------------------------------------------+
+ * | PHB2 (SCOM Base Address 0x9012800) |
+ * | PBCQ2 (SCOM Base Address 0x2012800) |
+ * +----------------------------------------------------------------+
+ *
+ */
+
+static struct npu_dev_cap *npu_dev_find_capability(struct npu_dev *dev,
+ uint16_t id);
+
+#define OPAL_NPU_VERSION 0x02
+
+#define PCIE_CAP_START 0x40
+#define PCIE_CAP_END 0x80
+#define VENDOR_CAP_START 0x80
+#define VENDOR_CAP_END 0x90
+
+#define VENDOR_CAP_PCI_DEV_OFFSET 0x0d
+
+/* PCI config raw accessors */
+#define NPU_DEV_CFG_NORMAL_RD(d, o, s, v) \
+ npu_dev_cfg_read_raw(d, NPU_DEV_CFG_NORMAL, o, s, v)
+#define NPU_DEV_CFG_NORMAL_WR(d, o, s, v) \
+ npu_dev_cfg_write_raw(d, NPU_DEV_CFG_NORMAL, o, s, v)
+#define NPU_DEV_CFG_RDONLY_RD(d, o, s, v) \
+ npu_dev_cfg_read_raw(d, NPU_DEV_CFG_RDONLY, o, s, v)
+#define NPU_DEV_CFG_RDONLY_WR(d, o, s, v) \
+ npu_dev_cfg_write_raw(d, NPU_DEV_CFG_RDONLY, o, s, v)
+#define NPU_DEV_CFG_W1CLR_RD(d, o, s, v) \
+ npu_dev_cfg_read_raw(d, NPU_DEV_CFG_W1CLR, o, s, v)
+#define NPU_DEV_CFG_W1CLR_WR(d, o, s, v) \
+ npu_dev_cfg_write_raw(d, NPU_DEV_CFG_W1CLR, o, s, v)
+
+#define NPU_DEV_CFG_INIT(d, o, s, v, ro, w1) \
+ do { \
+ NPU_DEV_CFG_NORMAL_WR(d, o, s, v); \
+ NPU_DEV_CFG_RDONLY_WR(d, o, s, ro); \
+ NPU_DEV_CFG_W1CLR_WR(d, o, s, w1); \
+ } while(0)
+
+#define NPU_DEV_CFG_INIT_RO(d, o, s, v) \
+ NPU_DEV_CFG_INIT(d, o, s, v, 0xffffffff, 0)
+
+static void npu_dev_cfg_read_raw(struct npu_dev *dev,
+ uint32_t index,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t *val)
+{
+ uint8_t *pcfg = dev->config[index];
+ uint32_t r, t, i;
+
+ r = 0;
+ for (i = 0; i < size; i++) {
+ t = pcfg[offset + i];
+ r |= (t << (i * 8));
+ }
+
+ *val = r;
+}
+
+static void npu_dev_cfg_write_raw(struct npu_dev *dev,
+ uint32_t index,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t val)
+{
+ uint8_t *pcfg = dev->config[index];
+ uint32_t i;
+
+ for (i = offset; i < (offset + size); i++) {
+ pcfg[i] = val;
+ val = (val >> 8);
+ }
+}
+
+/* Returns the scom base for the given link index */
+static uint64_t npu_link_scom_base(struct dt_node *dn, uint32_t scom_base,
+ int index)
+{
+ struct dt_node *link;
+ uint32_t link_index;
+ char namebuf[32];
+
+ snprintf(namebuf, sizeof(namebuf), "link@%x", index);
+ link = dt_find_by_name(dn, namebuf);
+ assert(link);
+ link_index = dt_prop_get_u32(link, "ibm,npu-link-index");
+ return scom_base + (link_index * NPU_LINK_SIZE);
+}
+
+static uint64_t get_bar_size(uint64_t bar)
+{
+ return (1 << GETFIELD(NX_MMIO_BAR_SIZE, bar)) * 0x10000;
+}
+
+static void npu_lock(struct phb *phb)
+{
+ struct npu *p = phb_to_npu(phb);
+
+ lock(&p->lock);
+}
+
+static void npu_unlock(struct phb *phb)
+{
+ struct npu *p = phb_to_npu(phb);
+
+ unlock(&p->lock);
+}
+
+/* Update the changes of the device BAR to link BARs */
+static void npu_dev_bar_update(uint32_t gcid, struct npu_dev_bar *bar,
+ bool enable)
+{
+ uint64_t val;
+
+ if (!bar->xscom)
+ return;
+
+ val = bar->base;
+ val = SETFIELD(NX_MMIO_BAR_SIZE, val, ilog2(bar->size / 0x10000));
+ if (enable)
+ val |= NX_MMIO_BAR_ENABLE;
+ xscom_write(gcid, bar->xscom, val);
+}
+
+/* Trap for PCI command (0x4) to enable or disable device's BARs */
+static int64_t npu_dev_cfg_write_cmd(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t data)
+{
+ struct npu_dev *dev = trap->dev;
+ bool enable;
+
+ if (offset != PCI_CFG_CMD)
+ return OPAL_PARAMETER;
+ if (size != 1 && size != 2 && size != 4)
+ return OPAL_PARAMETER;
+
+ /* Update device BARs and link BARs will be syncrhonized
+ * with hardware automatically.
+ */
+ enable = !!(data & PCI_CFG_CMD_MEM_EN);
+ npu_dev_bar_update(dev->npu->chip_id, &dev->bar, enable);
+
+ /* Normal path to update PCI config buffer */
+ return OPAL_PARAMETER;
+}
+
+/*
+ * Trap for memory BARs: 0xFF's should be written to BAR register
+ * prior to getting its size.
+ */
+static int64_t npu_dev_cfg_read_bar(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t *data)
+{
+ struct npu_dev_bar *bar = trap->data;
+
+ /* Revert to normal path if we weren't trapped for BAR size */
+ if (!bar->trapped)
+ return OPAL_PARAMETER;
+
+ if (offset != trap->start &&
+ offset != trap->start + 4)
+ return OPAL_PARAMETER;
+ if (size != 4)
+ return OPAL_PARAMETER;
+
+ bar->trapped = false;
+ *data = bar->bar_sz;
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_dev_cfg_write_bar(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t data)
+{
+ struct npu_dev_bar *bar = trap->data;
+ struct npu_dev *dev = container_of(bar, struct npu_dev, bar);
+ uint32_t pci_cmd;
+
+ if (offset != trap->start &&
+ offset != trap->start + 4)
+ return OPAL_PARAMETER;
+ if (size != 4)
+ return OPAL_PARAMETER;
+
+ /* Return BAR size on next read */
+ if (data == 0xffffffff) {
+ bar->trapped = true;
+ if (offset == trap->start)
+ bar->bar_sz = (bar->size & 0xffffffff);
+ else
+ bar->bar_sz = (bar->size >> 32);
+
+ return OPAL_SUCCESS;
+ }
+
+ /* Update BAR base address */
+ if (offset == trap->start) {
+ bar->base &= 0xffffffff00000000;
+ bar->base |= (data & 0xfffffff0);
+ } else {
+ bar->base &= 0x00000000ffffffff;
+ bar->base |= ((uint64_t)data << 32);
+
+ NPU_DEV_CFG_NORMAL_RD(dev, PCI_CFG_CMD, 4, &pci_cmd);
+ npu_dev_bar_update(dev->npu->chip_id, bar,
+ !!(pci_cmd & PCI_CFG_CMD_MEM_EN));
+ }
+
+ /* We still depend on the normal path to update the
+ * cached config buffer.
+ */
+ return OPAL_PARAMETER;
+}
+
+static struct npu_dev *bdfn_to_npu_dev(struct npu *p, uint32_t bdfn)
+{
+ int i;
+
+ /* Sanity check */
+ if (bdfn & ~0xff)
+ return NULL;
+
+ for(i = 0; i < p->total_devices; i++) {
+ if (p->devices[i].bdfn == bdfn)
+ return &p->devices[i];
+ }
+
+ return NULL;
+
+}
+
+static struct npu_dev *npu_dev_cfg_check(struct npu *p,
+ uint32_t bdfn,
+ uint32_t offset,
+ uint32_t size)
+{
+ /* Sanity check */
+ if (offset >= NPU_DEV_CFG_SIZE)
+ return NULL;
+ if (offset & (size - 1))
+ return NULL;
+
+ return bdfn_to_npu_dev(p, bdfn);
+}
+
+static struct npu_dev_trap *npu_dev_trap_check(struct npu_dev *dev,
+ uint32_t offset,
+ uint32_t size,
+ bool read)
+{
+ struct npu_dev_trap *trap;
+
+ list_for_each(&dev->traps, trap, link) {
+ if (read && !trap->read)
+ continue;
+ if (!read && !trap->write)
+ continue;
+
+ /* The requested region is overlapped with the one
+ * specified by the trap, to pick the trap and let it
+ * handle the request
+ */
+ if (offset <= trap->end &&
+ (offset + size - 1) >= trap->start)
+ return trap;
+ }
+
+ return NULL;
+}
+
+static int64_t _npu_dev_cfg_read(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t *data,
+ size_t size)
+{
+ struct npu *p = phb_to_npu(phb);
+ struct npu_dev *dev;
+ struct npu_dev_trap *trap;
+ int64_t ret;
+
+ /* Data returned upon errors */
+ *data = 0xffffffff;
+
+ /* If fenced, we want to return all 1s, so we're done. */
+ if (p->fenced)
+ return OPAL_SUCCESS;
+
+ /* Retrieve NPU device */
+ dev = npu_dev_cfg_check(p, bdfn, offset, size);
+ if (!dev)
+ return OPAL_PARAMETER;
+
+ /* Retrieve trap */
+ trap = npu_dev_trap_check(dev, offset, size, true);
+ if (trap) {
+ ret = trap->read(trap, offset,
+ size, (uint32_t *)data);
+ if (ret == OPAL_SUCCESS)
+ return ret;
+ }
+
+ NPU_DEV_CFG_NORMAL_RD(dev, offset, size, data);
+
+ return OPAL_SUCCESS;
+}
+
+#define NPU_DEV_CFG_READ(size, type) \
+static int64_t npu_dev_cfg_read##size(struct phb *phb, uint32_t bdfn, \
+ uint32_t offset, type *data) \
+{ \
+ int64_t rc; \
+ uint32_t val; \
+ \
+ /* Data returned upon errors */ \
+ rc = _npu_dev_cfg_read(phb, bdfn, offset, &val, sizeof(*data)); \
+ *data = (type)val; \
+ return rc; \
+}
+
+static int64_t _npu_dev_cfg_write(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t data,
+ size_t size)
+{
+ struct npu *p = phb_to_npu(phb);
+ struct npu_dev *dev;
+ struct npu_dev_trap *trap;
+ uint32_t val, v, r, c, i;
+ int64_t ret;
+
+ /* Retrieve NPU device */
+ dev = npu_dev_cfg_check(p, bdfn, offset, size);
+ if (!dev)
+ return OPAL_PARAMETER;
+
+ /* Retrieve trap */
+ trap = npu_dev_trap_check(dev, offset, size, false);
+ if (trap) {
+ ret = trap->write(trap, offset,
+ size, (uint32_t)data);
+ if (ret == OPAL_SUCCESS)
+ return ret;
+ }
+
+ /* Handle read-only and W1C bits */
+ val = data;
+ for (i = 0; i < size; i++) {
+ v = dev->config[NPU_DEV_CFG_NORMAL][offset + i];
+ r = dev->config[NPU_DEV_CFG_RDONLY][offset + i];
+ c = dev->config[NPU_DEV_CFG_W1CLR][offset + i];
+
+ /* Drop read-only bits */
+ val &= ~(r << (i * 8));
+ val |= (r & v) << (i * 8);
+
+ /* Drop W1C bits */
+ val &= ~(val & ((c & v) << (i * 8)));
+ }
+
+ NPU_DEV_CFG_NORMAL_WR(dev, offset, size, val);
+ return OPAL_SUCCESS;
+}
+
+#define NPU_DEV_CFG_WRITE(size, type) \
+static int64_t npu_dev_cfg_write##size(struct phb *phb, uint32_t bdfn, \
+ uint32_t offset, type data) \
+{ \
+ return _npu_dev_cfg_write(phb, bdfn, offset, \
+ data, sizeof(data)); \
+}
+
+NPU_DEV_CFG_READ(8, u8)
+NPU_DEV_CFG_READ(16, u16)
+NPU_DEV_CFG_READ(32, u32)
+NPU_DEV_CFG_WRITE(8, u8)
+NPU_DEV_CFG_WRITE(16, u16)
+NPU_DEV_CFG_WRITE(32, u32)
+
+/*
+ * Add calls to trap reads and writes to a NPU config space.
+ */
+static void npu_dev_add_cfg_trap(struct npu_dev *dev, uint32_t start,
+ uint32_t size, void *data,
+ int64_t (*read)(struct npu_dev_trap *,
+ uint32_t,
+ uint32_t,
+ uint32_t *),
+ int64_t (*write)(struct npu_dev_trap *,
+ uint32_t,
+ uint32_t,
+ uint32_t))
+{
+ struct npu_dev_trap *trap;
+
+ trap = zalloc(sizeof(struct npu_dev_trap));
+ assert(trap);
+ trap->dev = dev;
+ trap->start = start;
+ trap->end = start + size - 1;
+ trap->read = read;
+ trap->write = write;
+ trap->data = data;
+ list_add_tail(&dev->traps, &trap->link);
+}
+
+static int __npu_dev_bind_pci_dev(struct phb *phb __unused,
+ struct pci_device *pd,
+ void *data)
+{
+ struct npu_dev *dev = data;
+ struct dt_node *pci_dt_node;
+ uint32_t npu_npcq_phandle;
+
+ /* Ignore non-nvidia PCI devices */
+ if ((pd->vdid & 0xffff) != 0x10de)
+ return 0;
+
+ /* Find the PCI devices pbcq */
+ for (pci_dt_node = pd->dn->parent;
+ pci_dt_node && !dt_find_property(pci_dt_node, "ibm,pbcq");
+ pci_dt_node = pci_dt_node->parent);
+
+ if (!pci_dt_node)
+ return 0;
+
+ npu_npcq_phandle = dt_prop_get_u32(dev->dt_node, "ibm,npu-pbcq");
+
+ if (dt_prop_get_u32(pci_dt_node, "ibm,pbcq") == npu_npcq_phandle &&
+ (pd->vdid & 0xffff) == 0x10de)
+ return 1;
+
+ return 0;
+}
+
+static void npu_dev_bind_pci_dev(struct npu_dev *dev)
+{
+ struct phb *phb;
+ uint32_t i;
+
+ if (dev->pd)
+ return;
+
+ for (i = 0; i < 64; i++) {
+ if (dev->npu->phb.opal_id == i)
+ continue;
+
+ phb = pci_get_phb(i);
+ if (!phb)
+ continue;
+
+ dev->pd = pci_walk_dev(phb, __npu_dev_bind_pci_dev, dev);
+ if (dev->pd) {
+ dev->phb = phb;
+ /* Found the device, set the bit in config space */
+ NPU_DEV_CFG_INIT_RO(dev, VENDOR_CAP_START +
+ VENDOR_CAP_PCI_DEV_OFFSET, 1, 0x01);
+ return;
+ }
+ }
+
+ prlog(PR_ERR, "%s: NPU device %04x:00:%02x.0 not binding to PCI device\n",
+ __func__, dev->npu->phb.opal_id, dev->index);
+}
+
+static struct lock pci_npu_phandle_lock = LOCK_UNLOCKED;
+
+/* Appends an NPU phandle to the given PCI device node ibm,npu
+ * property */
+static void npu_append_pci_phandle(struct dt_node *dn, u32 phandle)
+{
+ uint32_t *npu_phandles;
+ struct dt_property *pci_npu_phandle_prop;
+ size_t prop_len;
+
+ /* Use a lock to make sure no one else has a reference to an
+ * ibm,npu property (this assumes this is the only function
+ * that holds a reference to it). */
+ lock(&pci_npu_phandle_lock);
+
+ /* This function shouldn't be called unless ibm,npu exists */
+ pci_npu_phandle_prop = (struct dt_property *)
+ dt_require_property(dn, "ibm,npu", -1);
+
+ /* Need to append to the properties */
+ prop_len = pci_npu_phandle_prop->len;
+ prop_len += sizeof(*npu_phandles);
+ dt_resize_property(&pci_npu_phandle_prop, prop_len);
+ pci_npu_phandle_prop->len = prop_len;
+
+ npu_phandles = (uint32_t *) pci_npu_phandle_prop->prop;
+ npu_phandles[prop_len/sizeof(*npu_phandles) - 1] = phandle;
+ unlock(&pci_npu_phandle_lock);
+}
+
+static void npu_dn_fixup(struct phb *phb, struct pci_device *pd)
+{
+ struct npu *p = phb_to_npu(phb);
+ struct npu_dev *dev;
+
+ dev = bdfn_to_npu_dev(p, pd->bdfn);
+ assert(dev);
+
+ if (dev->phb || dev->pd)
+ return;
+
+ /* Bind the emulated PCI device with the real one, which can't
+ * be done until the PCI devices are populated. Once the real
+ * PCI device is identified, we also need fix the device-tree
+ * for it
+ */
+ npu_dev_bind_pci_dev(dev);
+ if (dev->phb && dev->pd && dev->pd->dn) {
+ if (dt_find_property(dev->pd->dn, "ibm,npu"))
+ npu_append_pci_phandle(dev->pd->dn, pd->dn->phandle);
+ else
+ dt_add_property_cells(dev->pd->dn, "ibm,npu", pd->dn->phandle);
+
+ dt_add_property_cells(pd->dn, "ibm,gpu", dev->pd->dn->phandle);
+ }
+}
+
+static void npu_ioda_init(struct npu *p)
+{
+ uint64_t *data64;
+ uint32_t i;
+
+ /* LXIVT - Disable all LSIs */
+ for (i = 0; i < ARRAY_SIZE(p->lxive_cache); i++) {
+ data64 = &p->lxive_cache[i];
+ *data64 = SETFIELD(NPU_IODA_LXIVT_PRIORITY, 0ul, 0xff);
+ *data64 = SETFIELD(NPU_IODA_LXIVT_SERVER, *data64, 0);
+ }
+
+ /* PCT - Reset to reserved PE# */
+ for (i = 0; i < ARRAY_SIZE(p->pce_cache); i++) {
+ data64 = &p->pce_cache[i];
+ *data64 = SETFIELD(NPU_IODA_PCT_PE, 0ul, NPU_NUM_OF_PES);
+ *data64 |= NPU_IODA_PCT_LINK_ENABLED;
+ }
+
+ /* Clear TVT */
+ memset(p->tve_cache, 0, sizeof(p->tve_cache));
+}
+
+static int64_t npu_ioda_reset(struct phb *phb, bool purge)
+{
+ struct npu *p = phb_to_npu(phb);
+ uint32_t i;
+
+ if (purge) {
+ NPUDBG(p, "Purging all IODA tables...\n");
+ npu_ioda_init(p);
+ }
+
+ /* LIST */
+ npu_ioda_sel(p, NPU_IODA_TBL_LIST, 0, true);
+ for (i = 0; i < 8; i++)
+ out_be64(p->at_regs + NPU_IODA_DATA0, 0x1);
+
+ /* LIXVT */
+ npu_ioda_sel(p, NPU_IODA_TBL_LXIVT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->lxive_cache); i++)
+ out_be64(p->at_regs + NPU_IODA_DATA0, p->lxive_cache[i]);
+
+ /* PCT */
+ npu_ioda_sel(p, NPU_IODA_TBL_PCT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->pce_cache); i++)
+ out_be64(p->at_regs + NPU_IODA_DATA0, p->pce_cache[i]);
+
+ /* TVT */
+ npu_ioda_sel(p, NPU_IODA_TBL_TVT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->tve_cache); i++)
+ out_be64(p->at_regs + NPU_IODA_DATA0, p->tve_cache[i]);
+
+ return OPAL_SUCCESS;
+}
+
+static int npu_isn_valid(struct npu *p, uint32_t isn)
+{
+ if (p->chip_id != p8_irq_to_chip(isn) || p->index != 0 ||
+ NPU_IRQ_NUM(isn) < NPU_LSI_IRQ_MIN ||
+ NPU_IRQ_NUM(isn) > NPU_LSI_IRQ_MAX) {
+ NPUERR(p, "isn 0x%x not valid for this NPU\n", isn);
+ return false;
+ }
+
+ return true;
+}
+
+static int64_t npu_lsi_get_xive(void *data,
+ uint32_t isn,
+ uint16_t *server,
+ uint8_t *prio)
+{
+ struct npu *p = data;
+ uint32_t irq = NPU_IRQ_NUM(isn);
+ uint64_t lxive;
+
+ if (!npu_isn_valid(p, isn))
+ return OPAL_PARAMETER;
+
+ /* The content is fetched from the cache, which requires
+ * that the initial cache should be initialized with the
+ * default values
+ */
+ irq -= NPU_LSI_IRQ_MIN;
+ lxive = p->lxive_cache[irq];
+ *server = GETFIELD(NPU_IODA_LXIVT_SERVER, lxive);
+ *prio = GETFIELD(NPU_IODA_LXIVT_PRIORITY, lxive);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_lsi_set_xive(void *data,
+ uint32_t isn,
+ uint16_t server,
+ uint8_t prio)
+{
+ struct npu *p = data;
+ uint32_t irq = NPU_IRQ_NUM(isn);
+ uint64_t lxive;
+
+ if (!npu_isn_valid(p, isn))
+ return OPAL_PARAMETER;
+
+ /* Figure out LXIVT entry */
+ lxive = SETFIELD(NPU_IODA_LXIVT_SERVER, 0ul, server);
+ lxive = SETFIELD(NPU_IODA_LXIVT_PRIORITY, lxive, prio);
+
+ /* Cache LXIVT entry */
+ irq -= NPU_LSI_IRQ_MIN;
+ p->lxive_cache[irq] = lxive;
+
+ /* Update to LXIVT entry */
+ npu_ioda_sel(p, NPU_IODA_TBL_LXIVT, irq, false);
+ lxive = in_be64(p->at_regs + NPU_IODA_DATA0);
+ lxive = SETFIELD(NPU_IODA_LXIVT_SERVER, lxive, server);
+ lxive = SETFIELD(NPU_IODA_LXIVT_PRIORITY, lxive, prio);
+ out_be64(p->at_regs + NPU_IODA_DATA0, lxive);
+
+ return OPAL_SUCCESS;
+}
+
+static void npu_err_interrupt(void *data, uint32_t isn)
+{
+ struct npu *p = data;
+ uint32_t irq = NPU_IRQ_NUM(isn);
+
+ if (!npu_isn_valid(p, isn))
+ return;
+
+ /* There're 4 LSIs used for error reporting: 4/5 for data
+ * link error reporting while 6/7 for frozen PE detection
+ */
+ irq -= NPU_LSI_IRQ_MIN;
+ switch (irq) {
+ case 4 ... 5:
+ prerror("Invalid NPU error interrupt received\n");
+ break;
+ case 6 ... 7:
+ opal_update_pending_evt(OPAL_EVENT_PCI_ERROR,
+ OPAL_EVENT_PCI_ERROR);
+ }
+}
+
+/* LSIs (OS owned) */
+static const struct irq_source_ops npu_lsi_irq_ops = {
+ .get_xive = npu_lsi_get_xive,
+ .set_xive = npu_lsi_set_xive,
+};
+
+/* Error LSIs (skiboot owned) */
+static const struct irq_source_ops npu_err_lsi_irq_ops = {
+ .get_xive = npu_lsi_get_xive,
+ .set_xive = npu_lsi_set_xive,
+ .interrupt = npu_err_interrupt,
+};
+
+static void npu_register_irq(struct npu *p)
+{
+ register_irq_source(&npu_lsi_irq_ops, p,
+ p->base_lsi, 4);
+ register_irq_source(&npu_err_lsi_irq_ops, p,
+ p->base_lsi + 4, 4);
+}
+
+static void npu_hw_init(struct npu *p)
+{
+ /* 3 MMIO setup for AT */
+ out_be64(p->at_regs + NPU_LSI_SOURCE_ID,
+ SETFIELD(NPU_LSI_SRC_ID_BASE, 0ul, 0x7f));
+ out_be64(p->at_regs + NPU_INTREP_TIMER, 0x0ul);
+ npu_ioda_reset(&p->phb, false);
+}
+
+static int64_t npu_map_pe_dma_window_real(struct phb *phb,
+ uint16_t pe_num,
+ uint16_t window_id,
+ uint64_t pci_start_addr,
+ uint64_t pci_mem_size)
+{
+ struct npu *p = phb_to_npu(phb);
+ uint64_t end;
+ uint64_t tve;
+
+ /* Sanity check. Each PE has one corresponding TVE */
+ if (pe_num >= NPU_NUM_OF_PES ||
+ window_id != pe_num)
+ return OPAL_PARAMETER;
+
+ if (pci_mem_size) {
+ /* Enable */
+
+ end = pci_start_addr + pci_mem_size;
+
+ /* We have to be 16M aligned */
+ if ((pci_start_addr & 0x00ffffff) ||
+ (pci_mem_size & 0x00ffffff))
+ return OPAL_PARAMETER;
+
+ /*
+ * It *looks* like this is the max we can support (we need
+ * to verify this. Also we are not checking for rollover,
+ * but then we aren't trying too hard to protect ourselves
+ * againt a completely broken OS.
+ */
+ if (end > 0x0003ffffffffffffull)
+ return OPAL_PARAMETER;
+
+ /*
+ * Put start address bits 49:24 into TVE[52:53]||[0:23]
+ * and end address bits 49:24 into TVE[54:55]||[24:47]
+ * and set TVE[51]
+ */
+ tve = (pci_start_addr << 16) & (0xffffffull << 48);
+ tve |= (pci_start_addr >> 38) & (3ull << 10);
+ tve |= (end >> 8) & (0xfffffful << 16);
+ tve |= (end >> 40) & (3ull << 8);
+ tve |= PPC_BIT(51);
+ } else {
+ /* Disable */
+ tve = 0;
+ }
+
+ npu_ioda_sel(p, NPU_IODA_TBL_TVT, window_id, false);
+ out_be64(p->at_regs + NPU_IODA_DATA0, tve);
+ p->tve_cache[window_id] = tve;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_map_pe_dma_window(struct phb *phb,
+ uint16_t pe_num,
+ uint16_t window_id,
+ uint16_t tce_levels,
+ uint64_t tce_table_addr,
+ uint64_t tce_table_size,
+ uint64_t tce_page_size)
+{
+ struct npu *p = phb_to_npu(phb);
+ uint64_t tts_encoded;
+ uint64_t data64 = 0;
+
+ /* Sanity check. Each PE has one corresponding TVE */
+ if (pe_num >= NPU_NUM_OF_PES ||
+ window_id != pe_num)
+ return OPAL_PARAMETER;
+
+ /* Special condition, zero TCE table size used to disable
+ * the TVE.
+ */
+ if (!tce_table_size) {
+ npu_ioda_sel(p, NPU_IODA_TBL_TVT, window_id, false);
+ out_be64(p->at_regs + NPU_IODA_DATA0, 0ul);
+ p->tve_cache[window_id] = 0ul;
+ return OPAL_SUCCESS;
+ }
+
+ /* Additional arguments validation */
+ if (tce_levels < 1 ||
+ tce_levels > 4 ||
+ !is_pow2(tce_table_size) ||
+ tce_table_size < 0x1000)
+ return OPAL_PARAMETER;
+
+ /* TCE table size */
+ data64 = SETFIELD(NPU_IODA_TVT_TTA, 0ul, tce_table_addr >> 12);
+ tts_encoded = ilog2(tce_table_size) - 11;
+ if (tts_encoded > 39)
+ return OPAL_PARAMETER;
+ data64 = SETFIELD(NPU_IODA_TVT_SIZE, data64, tts_encoded);
+
+ /* TCE page size */
+ switch (tce_page_size) {
+ case 0x10000: /* 64K */
+ data64 = SETFIELD(NPU_IODA_TVT_PSIZE, data64, 5);
+ break;
+ case 0x1000000: /* 16M */
+ data64 = SETFIELD(NPU_IODA_TVT_PSIZE, data64, 13);
+ break;
+ case 0x10000000: /* 256M */
+ data64 = SETFIELD(NPU_IODA_TVT_PSIZE, data64, 17);
+ break;
+ case 0x1000: /* 4K */
+ default:
+ data64 = SETFIELD(NPU_IODA_TVT_PSIZE, data64, 1);
+ }
+
+ /* Number of levels */
+ data64 = SETFIELD(NPU_IODA_TVT_LEVELS, data64, tce_levels - 1);
+
+ /* Update to hardware */
+ npu_ioda_sel(p, NPU_IODA_TBL_TVT, window_id, false);
+ out_be64(p->at_regs + NPU_IODA_DATA0, data64);
+ p->tve_cache[window_id] = data64;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_set_pe(struct phb *phb,
+ uint64_t pe_num,
+ uint64_t bdfn,
+ uint8_t bcompare,
+ uint8_t dcompare,
+ uint8_t fcompare,
+ uint8_t action)
+{
+ struct npu *p = phb_to_npu(phb);
+ struct npu_dev *dev;
+ uint32_t link_idx;
+ uint64_t *data64;
+
+ /* Sanity check */
+ if (action != OPAL_MAP_PE &&
+ action != OPAL_UNMAP_PE)
+ return OPAL_PARAMETER;
+ if (pe_num >= NPU_NUM_OF_PES)
+ return OPAL_PARAMETER;
+
+ /* All emulated PCI devices hooked to root bus, whose
+ * bus number is zero.
+ */
+ dev = bdfn_to_npu_dev(p, bdfn);
+ if ((bdfn >> 8) || !dev)
+ return OPAL_PARAMETER;
+
+ link_idx = dev->index;
+ dev->pe_num = pe_num;
+
+ /* Separate links will be mapped to different PEs */
+ if (bcompare != OpalPciBusAll ||
+ dcompare != OPAL_COMPARE_RID_DEVICE_NUMBER ||
+ fcompare != OPAL_COMPARE_RID_FUNCTION_NUMBER)
+ return OPAL_UNSUPPORTED;
+
+ /* Map the link to the corresponding PE */
+ data64 = &p->pce_cache[link_idx];
+ if (action == OPAL_MAP_PE)
+ *data64 = SETFIELD(NPU_IODA_PCT_PE, *data64,
+ pe_num);
+ else
+ *data64 = SETFIELD(NPU_IODA_PCT_PE, *data64,
+ NPU_NUM_OF_PES);
+
+ *data64 |= NPU_IODA_PCT_LINK_ENABLED;
+
+ npu_ioda_sel(p, NPU_IODA_TBL_PCT, link_idx, false);
+ out_be64(p->at_regs + NPU_IODA_DATA0, *data64);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_link_state(struct phb *phb __unused)
+{
+ /* As we're emulating all PCI stuff, the link bandwidth
+ * isn't big deal anyway.
+ */
+ return OPAL_SHPC_LINK_UP_x1;
+}
+
+static int64_t npu_power_state(struct phb *phb __unused)
+{
+ return OPAL_SHPC_POWER_ON;
+}
+
+static int64_t npu_hreset(struct phb *phb __unused)
+{
+ prlog(PR_DEBUG, "NPU: driver should call reset procedure here\n");
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_freset(struct phb *phb __unused)
+{
+ /* FIXME: PHB fundamental reset, which need to be
+ * figured out later. It's used by EEH recovery
+ * upon fenced AT.
+ */
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_freeze_status(struct phb *phb,
+ uint64_t pe_number __unused,
+ uint8_t *freeze_state,
+ uint16_t *pci_error_type __unused,
+ uint16_t *severity __unused,
+ uint64_t *phb_status __unused)
+{
+ /* FIXME: When it's called by skiboot PCI config accessor,
+ * the PE number is fixed to 0, which is incorrect. We need
+ * introduce another PHB callback to translate it. For now,
+ * it keeps the skiboot PCI enumeration going.
+ */
+ struct npu *p = phb_to_npu(phb);
+ if (p->fenced)
+ *freeze_state = OPAL_EEH_STOPPED_MMIO_DMA_FREEZE;
+ else
+ *freeze_state = OPAL_EEH_STOPPED_NOT_FROZEN;
+ return OPAL_SUCCESS;
+}
+
+static int64_t npu_eeh_next_error(struct phb *phb,
+ uint64_t *first_frozen_pe,
+ uint16_t *pci_error_type,
+ uint16_t *severity)
+{
+ struct npu *p = phb_to_npu(phb);
+ int i;
+ uint64_t result = 0;
+ *first_frozen_pe = -1;
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+
+ if (p->fenced) {
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_PHB_FENCED;
+ return OPAL_SUCCESS;
+ }
+
+ npu_ioda_sel(p, NPU_IODA_TBL_PESTB, 0, true);
+ for (i = 0; i < NPU_NUM_OF_PES; i++) {
+ result = in_be64(p->at_regs + NPU_IODA_DATA0);
+ if (result > 0) {
+ *first_frozen_pe = i;
+ *pci_error_type = OPAL_EEH_PE_ERROR;
+ *severity = OPAL_EEH_SEV_PE_ER;
+ break;
+ }
+ }
+
+ return OPAL_SUCCESS;
+}
+
+
+/* Sets the NPU to trigger an error when a DMA occurs */
+static int64_t npu_err_inject(struct phb *phb, uint32_t pe_num,
+ uint32_t type, uint32_t func __unused,
+ uint64_t addr __unused, uint64_t mask __unused)
+{
+ struct npu *p = phb_to_npu(phb);
+ struct npu_dev *dev = NULL;
+ int i;
+
+ if (pe_num > NPU_NUM_OF_PES) {
+ prlog(PR_ERR, "NPU: error injection failed, bad PE given\n");
+ return OPAL_PARAMETER;
+ }
+
+ for (i = 0; i < p->total_devices; i++) {
+ if (p->devices[i].pe_num == pe_num) {
+ dev = &p->devices[i];
+ break;
+ }
+ }
+
+ if (!dev) {
+ prlog(PR_ERR, "NPU: couldn't find device with PE %x\n", pe_num);
+ return OPAL_PARAMETER;
+ }
+
+ /* TODO: extend this to conform to OPAL injection standards */
+ if (type > 1) {
+ prlog(PR_ERR, "NPU: invalid error injection type\n");
+ return OPAL_PARAMETER;
+ } else if (type == 1) {
+ /* Emulate fence mode. */
+ p->fenced = true;
+ } else {
+ /* Cause a freeze with an invalid MMIO write. */
+ in_be64((void *)dev->bar.base);
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static const struct phb_ops npu_ops = {
+ .lock = npu_lock,
+ .unlock = npu_unlock,
+ .cfg_read8 = npu_dev_cfg_read8,
+ .cfg_read16 = npu_dev_cfg_read16,
+ .cfg_read32 = npu_dev_cfg_read32,
+ .cfg_write8 = npu_dev_cfg_write8,
+ .cfg_write16 = npu_dev_cfg_write16,
+ .cfg_write32 = npu_dev_cfg_write32,
+ .choose_bus = NULL,
+ .device_init = NULL,
+ .device_node_fixup = npu_dn_fixup,
+ .presence_detect = NULL,
+ .ioda_reset = npu_ioda_reset,
+ .papr_errinjct_reset = NULL,
+ .pci_reinit = NULL,
+ .set_phb_mem_window = NULL,
+ .phb_mmio_enable = NULL,
+ .map_pe_mmio_window = NULL,
+ .map_pe_dma_window = npu_map_pe_dma_window,
+ .map_pe_dma_window_real = npu_map_pe_dma_window_real,
+ .pci_msi_eoi = NULL,
+ .set_xive_pe = NULL,
+ .get_msi_32 = NULL,
+ .get_msi_64 = NULL,
+ .set_pe = npu_set_pe,
+ .set_peltv = NULL,
+ .link_state = npu_link_state,
+ .power_state = npu_power_state,
+ .slot_power_off = NULL,
+ .slot_power_on = NULL,
+ .hot_reset = npu_hreset,
+ .fundamental_reset = npu_freset,
+ .complete_reset = NULL,
+ .poll = NULL,
+ .eeh_freeze_status = npu_freeze_status,
+ .eeh_freeze_clear = NULL,
+ .eeh_freeze_set = NULL,
+ .next_error = npu_eeh_next_error,
+ .err_inject = npu_err_inject,
+ .get_diag_data = NULL,
+ .get_diag_data2 = NULL,
+ .set_capi_mode = NULL,
+ .set_capp_recovery = NULL,
+};
+
+static void assign_mmio_bars(uint32_t gcid, uint32_t xscom,
+ struct dt_node *npu_dn, uint64_t mm_win[2])
+{
+ uint64_t mem_start, mem_end;
+ struct npu_dev_bar bar;
+ struct dt_node *link;
+
+ /* Configure BAR selection.
+ *
+ * Currently, each PHY contains 2 links and each link has 2
+ * BARs. The first BAR is assigned to the DLTL region which is
+ * what the kernel uses. The second BAR is either assigned to
+ * either the PL or AT region or unassigned. The PL0/PL1/AT
+ * MMIO regions are not exposed to the kernel so we assigned
+ * them at the start of the available memory area followed by
+ * the DLTL regions. So we end up with the following memory
+ * map (assuming we're given a memory region starting at
+ * 0x3fff000000000):
+ *
+ * Link#0-BAR#0: NTL/NDL BAR (128KB) - 0x3fff000420000
+ * Link#0-BAR#1: PL0 BAR ( 2MB) - 0x3fff000000000
+ * Link#1-BAR#0: NTL/NDL BAR (128KB) - 0x3fff000440000
+ * Link#1-BAR#1: AT BAR ( 64KB) - 0x3fff000400000
+ * Link#2-BAR#0: NTL/NDL BAR (128KB) - 0x3fff000460000
+ * Link#2-BAR#1: PL1 BAR ( 2MB) - 0x3fff000200000
+ * Link#3-BAR#0: NTL/NDL BAR (128KB) - 0x3fff000480000
+ * Link#3-BAR#1: UNASSIGNED
+ */
+ xscom_write(gcid, xscom + NPU_AT_SCOM_OFFSET + NX_BAR,
+ 0x0211000043500000);
+
+ xscom_read(gcid, npu_link_scom_base(npu_dn, xscom, 0) + NX_MMIO_BAR_0,
+ &mem_start);
+ mem_start = GETFIELD(NX_MMIO_BAR_BASE, mem_start) << 12;
+
+ xscom_read(gcid, npu_link_scom_base(npu_dn, xscom, 5) + NX_MMIO_BAR_0,
+ &mem_end);
+ mem_end = (GETFIELD(NX_MMIO_BAR_BASE, mem_end) << 12) +
+ get_bar_size(mem_end);
+
+ /* PL0 BAR comes first at 0x3fff000000000 */
+ bar.xscom = npu_link_scom_base(npu_dn, xscom, 0) + NX_MMIO_BAR_1;
+ bar.base = mem_start;
+ bar.size = NX_MMIO_PL_SIZE;
+ npu_dev_bar_update(gcid, &bar, true);
+
+ /* PL1 BAR */
+ bar.xscom = npu_link_scom_base(npu_dn, xscom, 4) + NX_MMIO_BAR_1;
+ bar.base += bar.size;
+ bar.size = NX_MMIO_PL_SIZE;
+ npu_dev_bar_update(gcid, &bar, true);
+
+ /* Then the AT BAR */
+ bar.xscom = npu_link_scom_base(npu_dn, xscom, 1) + NX_MMIO_BAR_1;
+ bar.base += bar.size;
+ bar.size = NX_MMIO_AT_SIZE;
+ npu_dev_bar_update(gcid, &bar, true);
+
+ /* Now we configure all the DLTL BARs. These are the ones
+ * actually exposed to the kernel. */
+ mm_win[0] = bar.base + bar.size;
+ dt_for_each_node(npu_dn, link) {
+ uint32_t index;
+
+ index = dt_prop_get_u32(link, "ibm,npu-link-index");
+ bar.xscom = npu_link_scom_base(npu_dn, xscom, index) +
+ NX_MMIO_BAR_0;
+ bar.base += bar.size;
+ bar.size = NX_MMIO_DL_SIZE;
+ bar.base = ALIGN_UP(bar.base, bar.size);
+ npu_dev_bar_update(gcid, &bar, false);
+ }
+ mm_win[1] = (bar.base + bar.size) - mm_win[0];
+
+ /* If we weren't given enough room to setup all the BARs we
+ * require it's better to crash here than risk creating
+ * overlapping BARs which will xstop the machine randomly in
+ * the future.*/
+ assert(bar.base + bar.size <= mem_end);
+}
+
+/* Probe NPU device node and create PCI root device node
+ * accordingly. The NPU deivce node should specify number
+ * of links and xscom base address to access links.
+ */
+static void npu_probe_phb(struct dt_node *dn)
+{
+ struct dt_node *np;
+ uint32_t gcid, index, xscom;
+ uint64_t at_bar[2], mm_win[2], val;
+ uint32_t links = 0;
+ char *path;
+
+ /* Retrieve chip id */
+ path = dt_get_path(dn);
+ gcid = dt_get_chip_id(dn);
+ index = dt_prop_get_u32(dn, "ibm,npu-index");
+ dt_for_each_compatible(dn, np, "ibm,npu-link")
+ links++;
+
+ prlog(PR_INFO, "Chip %d Found NPU%d (%d links) at %s\n",
+ gcid, index, links, path);
+ free(path);
+
+ /* Retrieve xscom base addr */
+ xscom = dt_get_address(dn, 0, NULL);
+ prlog(PR_INFO, " XSCOM Base: %08x\n", xscom);
+
+ assign_mmio_bars(gcid, xscom, dn, mm_win);
+
+ /* Retrieve AT BAR */
+ xscom_read(gcid, npu_link_scom_base(dn, xscom, 1) + NX_MMIO_BAR_1,
+ &val);
+ if (!(val & NX_MMIO_BAR_ENABLE)) {
+ prlog(PR_ERR, " AT BAR disabled!\n");
+ return;
+ }
+
+ at_bar[0] = GETFIELD(NX_MMIO_BAR_BASE, val) << 12;
+ at_bar[1] = get_bar_size(val);
+ prlog(PR_INFO, " AT BAR: %016llx (%lldKB)\n",
+ at_bar[0], at_bar[1] / 0x400);
+
+ /* Create PCI root device node */
+ np = dt_new_addr(dt_root, "pciex", at_bar[0]);
+ if (!np) {
+ prlog(PR_ERR, "%s: Cannot create PHB device node\n",
+ __func__);
+ return;
+ }
+
+ dt_add_property_strings(np, "compatible",
+ "ibm,power8-npu-pciex", "ibm,ioda2-npu-phb");
+ dt_add_property_strings(np, "device_type", "pciex");
+ dt_add_property(np, "reg", at_bar, sizeof(at_bar));
+
+ dt_add_property_cells(np, "ibm,phb-index", index);
+ dt_add_property_cells(np, "ibm,chip-id", gcid);
+ dt_add_property_cells(np, "ibm,xscom-base", xscom);
+ dt_add_property_cells(np, "ibm,npcq", dn->phandle);
+ dt_add_property_cells(np, "ibm,links", links);
+ dt_add_property(np, "ibm,mmio-window", mm_win, sizeof(mm_win));
+}
+
+static void npu_dev_populate_vendor_cap(struct npu_dev_cap *cap)
+{
+ struct npu_dev *dev = cap->dev;
+ uint32_t offset = cap->start;
+ uint8_t val;
+
+ /* Add length and version information */
+ val = cap->end - cap->start;
+ NPU_DEV_CFG_INIT_RO(dev, offset + 2, 1, val);
+ NPU_DEV_CFG_INIT_RO(dev, offset + 3, 1, OPAL_NPU_VERSION);
+ offset += 4;
+
+ /* Defaults when the trap can't handle the read/write (eg. due
+ * to reading/writing less than 4 bytes). */
+ val = 0x0;
+ NPU_DEV_CFG_INIT_RO(dev, offset, 4, val);
+ NPU_DEV_CFG_INIT_RO(dev, offset + 4, 4, val);
+
+ /* Create a trap for AT/PL procedures */
+ npu_dev_add_cfg_trap(dev, offset, 8, NULL, npu_dev_procedure_read,
+ npu_dev_procedure_write);
+ offset += 8;
+
+ NPU_DEV_CFG_INIT_RO(dev, offset, 1, dev->index);
+}
+
+static void npu_dev_populate_pcie_cap(struct npu_dev_cap *cap)
+{
+ struct npu_dev *dev = cap->dev;
+ uint32_t base = cap->start;
+ uint32_t val;
+
+ /* Sanity check on capability ID */
+ if (cap->id != PCI_CFG_CAP_ID_EXP) {
+ prlog(PR_NOTICE, "%s: Invalid capability ID %d (%d)\n",
+ __func__, cap->id, PCI_CFG_CAP_ID_EXP);
+ return;
+ }
+
+ /* Sanity check on spanned registers */
+ if ((cap->end - cap->start) < PCIE_CAP_START) {
+ prlog(PR_NOTICE, "%s: Invalid reg region [%x, %x] for cap %d\n",
+ __func__, cap->start, cap->end, cap->id);
+ return;
+ }
+
+ /* 0x00 - ID/PCIE capability */
+ val = cap->id;
+ val |= ((0x2 << 16) | (PCIE_TYPE_ENDPOINT << 20));
+ NPU_DEV_CFG_INIT_RO(dev, base, 4, val);
+
+ /* 0x04 - Device capability
+ *
+ * We should support FLR. Oterwhsie, it might have
+ * problem passing it through to userland via Linux
+ * VFIO infrastructure
+ */
+ val = ((PCIE_MPSS_128) |
+ (PCIE_PHANTOM_NONE << 3) |
+ (PCIE_L0SL_MAX_NO_LIMIT << 6) |
+ (PCIE_L1L_MAX_NO_LIMIT << 9) |
+ (PCICAP_EXP_DEVCAP_FUNC_RESET));
+ NPU_DEV_CFG_INIT_RO(dev, base + PCICAP_EXP_DEVCAP, 4, val);
+
+ /* 0x08 - Device control and status */
+ NPU_DEV_CFG_INIT(dev, base + PCICAP_EXP_DEVCTL, 4, 0x00002810,
+ 0xffff0000, 0x000f0000);
+
+ /* 0x0c - Link capability */
+ val = (PCIE_LSPEED_VECBIT_2 | (PCIE_LWIDTH_1X << 4));
+ NPU_DEV_CFG_INIT_RO(dev, base + PCICAP_EXP_LCAP, 4, val);
+
+ /* 0x10 - Link control and status */
+ NPU_DEV_CFG_INIT(dev, base + PCICAP_EXP_LCTL, 4, 0x00130000,
+ 0xfffff000, 0xc0000000);
+
+ /* 0x14 - Slot capability */
+ NPU_DEV_CFG_INIT_RO(dev, base + PCICAP_EXP_SLOTCAP, 4, 0x00000000);
+
+ /* 0x18 - Slot control and status */
+ NPU_DEV_CFG_INIT_RO(dev, base + PCICAP_EXP_SLOTCTL, 4, 0x00000000);
+
+ /* 0x1c - Root control and capability */
+ NPU_DEV_CFG_INIT(dev, base + PCICAP_EXP_RC, 4, 0x00000000,
+ 0xffffffe0, 0x00000000);
+
+ /* 0x20 - Root status */
+ NPU_DEV_CFG_INIT(dev, base + PCICAP_EXP_RSTAT, 4, 0x00000000,
+ 0xffffffff, 0x00010000);
+
+ /* 0x24 - Device capability 2 */
+ NPU_DEV_CFG_INIT_RO(dev, base + PCIECAP_EXP_DCAP2, 4, 0x00000000);
+
+ /* 0x28 - Device Control and status 2 */
+ NPU_DEV_CFG_INIT(dev, base + PCICAP_EXP_DCTL2, 4, 0x00070000,
+ 0xffff0000, 0x00000000);
+
+ /* 0x2c - Link capability 2 */
+ NPU_DEV_CFG_INIT_RO(dev, base + PCICAP_EXP_LCAP2, 4, 0x00000007);
+
+ /* 0x30 - Link control and status 2 */
+ NPU_DEV_CFG_INIT(dev, base + PCICAP_EXP_LCTL2, 4, 0x00000003,
+ 0xffff0000, 0x00200000);
+
+ /* 0x34 - Slot capability 2 */
+ NPU_DEV_CFG_INIT_RO(dev, base + PCICAP_EXP_SCAP2, 4, 0x00000000);
+
+ /* 0x38 - Slot control and status 2 */
+ NPU_DEV_CFG_INIT_RO(dev, base + PCICAP_EXP_SCTL2, 4, 0x00000000);
+}
+
+static struct npu_dev_cap *npu_dev_create_capability(struct npu_dev *dev,
+ void (*populate)(struct npu_dev_cap *),
+ uint16_t id,
+ uint16_t start,
+ uint16_t end)
+{
+ struct npu_dev_cap *cap;
+
+ /* Check if the capability is existing */
+ cap = npu_dev_find_capability(dev, id);
+ if (cap)
+ return cap;
+
+ /* Allocate new one */
+ cap = zalloc(sizeof(struct npu_dev_cap));
+ assert(cap);
+
+ /* Put it into the pool */
+ cap->id = id;
+ cap->start = start;
+ cap->end = end;
+ cap->dev = dev;
+ cap->populate = populate;
+ list_add_tail(&dev->capabilities, &cap->link);
+
+ return cap;
+}
+
+static struct npu_dev_cap *npu_dev_find_capability(struct npu_dev *dev,
+ uint16_t id)
+{
+ struct npu_dev_cap *cap;
+
+ list_for_each(&dev->capabilities, cap, link) {
+ if (cap->id == id)
+ return cap;
+ }
+
+ return NULL;
+}
+
+/*
+ * All capabilities should be put into the device capability
+ * list according to register offset in ascending order for
+ * easy access at later point.
+ */
+static void npu_dev_create_capabilities(struct npu_dev *dev)
+{
+ list_head_init(&dev->capabilities);
+
+ /* PCI express capability */
+ npu_dev_create_capability(dev, npu_dev_populate_pcie_cap,
+ PCI_CFG_CAP_ID_EXP, PCIE_CAP_START,
+ PCIE_CAP_END);
+
+ /* Vendor specific capability */
+ npu_dev_create_capability(dev, npu_dev_populate_vendor_cap,
+ PCI_CFG_CAP_ID_VENDOR, VENDOR_CAP_START,
+ VENDOR_CAP_END);
+}
+
+static void npu_dev_create_cfg(struct npu_dev *dev)
+{
+ struct npu_dev_cap *cap;
+ uint32_t offset;
+ uint32_t last_cap_offset;
+
+ /* Initialize config traps */
+ list_head_init(&dev->traps);
+
+ /* 0x00 - Vendor/Device ID */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_VENDOR_ID, 4, 0x04ea1014);
+
+ /* 0x04 - Command/Status
+ *
+ * Create one trap to trace toggling memory BAR enable bit
+ */
+ NPU_DEV_CFG_INIT(dev, PCI_CFG_CMD, 4, 0x00100000, 0xffb802b8,
+ 0xf9000000);
+
+ npu_dev_add_cfg_trap(dev, PCI_CFG_CMD, 1, NULL, NULL,
+ npu_dev_cfg_write_cmd);
+
+ /* 0x08 - Rev/Class/Cache */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_REV_ID, 4, 0x06800100);
+
+ /* 0x0c - CLS/Latency Timer/Header/BIST */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_CACHE_LINE_SIZE, 4, 0x00800000);
+
+ /* 0x10 - BARs, always 64-bits non-prefetchable
+ *
+ * Each emulated device represents one link and therefore
+ * there is one BAR for the assocaited DLTL region.
+ */
+
+ /* Low 32-bits */
+ NPU_DEV_CFG_INIT(dev, PCI_CFG_BAR0, 4,
+ (dev->bar.base & 0xfffffff0) | dev->bar.flags,
+ 0x0000000f, 0x00000000);
+
+ /* High 32-bits */
+ NPU_DEV_CFG_INIT(dev, PCI_CFG_BAR1, 4, (dev->bar.base >> 32),
+ 0x00000000, 0x00000000);
+
+ /*
+ * Create trap. Writting 0xFF's to BAR registers should be
+ * trapped and return size on next read
+ */
+ npu_dev_add_cfg_trap(dev, PCI_CFG_BAR0, 8, &dev->bar,
+ npu_dev_cfg_read_bar, npu_dev_cfg_write_bar);
+
+ /* 0x18/1c/20/24 - Disabled BAR#2/3/4/5
+ *
+ * Mark those BARs readonly so that 0x0 will be returned when
+ * probing the length and the BARs will be skipped.
+ */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_BAR2, 4, 0x00000000);
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_BAR3, 4, 0x00000000);
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_BAR4, 4, 0x00000000);
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_BAR5, 4, 0x00000000);
+
+ /* 0x28 - Cardbus CIS pointer */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_CARDBUS_CIS, 4, 0x00000000);
+
+ /* 0x2c - Subsystem ID */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_SUBSYS_VENDOR_ID, 4, 0x00000000);
+
+ /* 0x30 - ROM BAR
+ *
+ * Force its size to be zero so that the kernel will skip
+ * probing the ROM BAR. We needn't emulate ROM BAR.
+ */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_ROMBAR, 4, 0xffffffff);
+
+ /* 0x34 - PCI Capability
+ *
+ * By default, we don't have any capabilities
+ */
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_CAP, 4, 0x00000000);
+
+ last_cap_offset = PCI_CFG_CAP - 1;
+ list_for_each(&dev->capabilities, cap, link) {
+ offset = cap->start;
+
+ /* Initialize config space for the capability */
+ if (cap->populate)
+ cap->populate(cap);
+
+ /* Add capability header */
+ NPU_DEV_CFG_INIT_RO(dev, offset, 2, cap->id);
+
+ /* Update the next capability pointer */
+ NPU_DEV_CFG_NORMAL_WR(dev, last_cap_offset + 1, 1, offset);
+
+ last_cap_offset = offset;
+ }
+
+ /* 0x38 - Reserved */
+ NPU_DEV_CFG_INIT_RO(dev, 0x38, 4, 0x00000000);
+
+ /* 0x3c - INT line/pin/Minimal grant/Maximal latency */
+ if (!(dev->index % 2))
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_INT_LINE, 4, 0x00000100);
+ else
+ NPU_DEV_CFG_INIT_RO(dev, PCI_CFG_INT_LINE, 4, 0x00000200);
+}
+
+static uint32_t npu_allocate_bdfn(struct npu *p, uint32_t pbcq)
+{
+ int i;
+ int dev = -1;
+ int bdfn = -1;
+
+ /* Find the highest function number alloacted to emulated PCI
+ * devices associated with this GPU. */
+ for(i = 0; i < p->total_devices; i++) {
+ int dev_bdfn = p->devices[i].bdfn;
+ dev = MAX(dev, dev_bdfn & 0xf8);
+
+ if (dt_prop_get_u32(p->devices[i].dt_node,
+ "ibm,npu-pbcq") == pbcq)
+ bdfn = MAX(bdfn, dev_bdfn);
+ }
+
+ if (bdfn >= 0)
+ /* Device has already been allocated for this GPU so
+ * assign the emulated PCI device the next
+ * function. */
+ return bdfn + 1;
+ else if (dev >= 0)
+ /* Otherwise allocate a new device and allocate
+ * function 0. */
+ return dev + (1 << 3);
+ else
+ return 0;
+}
+
+static void npu_create_devices(struct dt_node *dn, struct npu *p)
+{
+ struct npu_dev *dev;
+ struct dt_node *npu_dn, *link;
+ uint32_t npu_phandle, index = 0;
+ uint64_t buid;
+ uint64_t lsisrcid;
+
+ lsisrcid = GETFIELD(NPU_LSI_SRC_ID_BASE,
+ in_be64(p->at_regs + NPU_LSI_SOURCE_ID));
+ buid = SETFIELD(NP_BUID_BASE, 0ull,
+ (p8_chip_irq_block_base(p->chip_id, P8_IRQ_BLOCK_MISC) | lsisrcid));
+ buid |= NP_BUID_ENABLE;
+
+ /* Get the npu node which has the links which we expand here
+ * into pci like devices attached to our emulated phb. */
+ npu_phandle = dt_prop_get_u32(dn, "ibm,npcq");
+ npu_dn = dt_find_by_phandle(dt_root, npu_phandle);
+ assert(npu_dn);
+
+ /* Walk the link@x nodes to initialize devices */
+ p->total_devices = 0;
+ p->phb.scan_map = 0;
+ dt_for_each_compatible(npu_dn, link, "ibm,npu-link") {
+ struct npu_dev_bar *bar;
+ uint32_t pbcq;
+ uint64_t val;
+ uint32_t j;
+
+ dev = &p->devices[index];
+ dev->index = dt_prop_get_u32(link, "ibm,npu-link-index");
+ dev->xscom = npu_link_scom_base(npu_dn, p->xscom_base,
+ dev->index);
+
+ dev->npu = p;
+ dev->dt_node = link;
+
+ /* We don't support MMIO PHY access yet */
+ dev->pl_base = NULL;
+
+ pbcq = dt_prop_get_u32(link, "ibm,npu-pbcq");
+ dev->bdfn = npu_allocate_bdfn(p, pbcq);
+
+ /* This must be done after calling
+ * npu_allocate_bdfn() */
+ p->total_devices++;
+ p->phb.scan_map |= 0x1 << ((dev->bdfn & 0xf8) >> 3);
+
+ dev->pl_xscom_base = dt_prop_get_u64(link, "ibm,npu-phy");
+ dev->lane_mask = dt_prop_get_u32(link, "ibm,npu-lane-mask");
+
+ /* Setup BUID/ISRN */
+ xscom_write(p->chip_id, dev->xscom + NX_NP_BUID, buid);
+
+ /* Setup emulated config space */
+ for (j = 0; j < NPU_DEV_CFG_MAX; j++)
+ dev->config[j] = zalloc(NPU_DEV_CFG_SIZE);
+ bar = &dev->bar;
+ bar->flags = (PCI_CFG_BAR_TYPE_MEM |
+ PCI_CFG_BAR_MEM64);
+
+ /* Update BAR info */
+ bar->xscom = dev->xscom + NX_MMIO_BAR_0;
+ xscom_read(p->chip_id, bar->xscom, &val);
+ bar->base = GETFIELD(NX_MMIO_BAR_BASE, val) << 12;
+ bar->size = get_bar_size(val);
+
+ /*
+ * The config space is initialised with the BARs
+ * disabled, so make sure it is actually disabled in
+ * hardware.
+ */
+ npu_dev_bar_update(p->chip_id, bar, false);
+
+ /* Initialize capabilities */
+ npu_dev_create_capabilities(dev);
+
+ /* Initialize config space */
+ npu_dev_create_cfg(dev);
+
+ index++;
+ }
+}
+
+static void npu_add_phb_properties(struct npu *p)
+{
+ struct dt_node *np = p->phb.dt_node;
+ uint32_t icsp = get_ics_phandle();
+ uint64_t tkill, mm_base, mm_size;
+ uint32_t base_lsi = p->base_lsi;
+ uint32_t map[] = { 0x0, 0x0, 0x0, 0x1, icsp, base_lsi,
+ 0x0, 0x0, 0x0, 0x2, icsp, base_lsi + 1,
+ 0x800, 0x0, 0x0, 0x1, icsp, base_lsi + 2,
+ 0x800, 0x0, 0x0, 0x2, icsp, base_lsi + 3 };
+ uint32_t mask[] = {0xf800, 0x0, 0x0, 0x7};
+
+ /* Add various properties that HB doesn't have to
+ * add, some of them simply because they result from
+ * policy decisions made in skiboot rather than in HB
+ * such as the MMIO windows going to PCI, interrupts,
+ * etc.
+ */
+ dt_add_property_cells(np, "#address-cells", 3);
+ dt_add_property_cells(np, "#size-cells", 2);
+ dt_add_property_cells(np, "#interrupt-cells", 1);
+ dt_add_property_cells(np, "bus-range", 0, 0xff);
+ dt_add_property_cells(np, "clock-frequency", 0x200, 0);
+ dt_add_property_cells(np, "interrupt-parent", icsp);
+
+ /* DLPL Interrupts */
+ p->phb.lstate.int_size = 1;
+ p->phb.lstate.int_val[0][0] = p->base_lsi + NPU_LSI_INT_DL0;
+ p->phb.lstate.int_val[1][0] = p->base_lsi + NPU_LSI_INT_DL1;
+ p->phb.lstate.int_val[2][0] = p->base_lsi + NPU_LSI_INT_DL2;
+ p->phb.lstate.int_val[3][0] = p->base_lsi + NPU_LSI_INT_DL3;
+ p->phb.lstate.int_parent[0] = icsp;
+ p->phb.lstate.int_parent[1] = icsp;
+ p->phb.lstate.int_parent[2] = icsp;
+ p->phb.lstate.int_parent[3] = icsp;
+
+ /* Due to the way the emulated PCI devices are structured in
+ * the device tree the core PCI layer doesn't do this for
+ * us. Besides the swizzling wouldn't suit our needs even if it
+ * did. */
+ dt_add_property(np, "interrupt-map", map, sizeof(map));
+ dt_add_property(np, "interrupt-map-mask", mask, sizeof(mask));
+
+ /* NPU PHB properties */
+ /* TODO: Due to an errata TCE KILL only works when DMA traffic
+ * has been stopped. We need to implement the work around
+ * which is to do a TCE kill all instead. */
+ tkill = cleanup_addr((uint64_t)p->at_regs) + NPU_TCE_KILL;
+ dt_add_property_cells(np, "ibm,opal-num-pes",
+ NPU_NUM_OF_PES);
+ dt_add_property_cells(np, "ibm,opal-reserved-pe",
+ NPU_NUM_OF_PES);
+ dt_add_property_cells(np, "ibm,opal-tce-kill",
+ hi32(tkill), lo32(tkill));
+
+ /* Memory window is exposed as 32-bits non-prefetchable
+ * one because 64-bits prefetchable one is kind of special
+ * to kernel.
+ */
+ mm_base = p->mm_base;
+ mm_size = p->mm_size;
+ dt_add_property_cells(np, "ranges", 0x02000000,
+ hi32(mm_base), lo32(mm_base),
+ hi32(mm_base), lo32(mm_base),
+ hi32(mm_size), lo32(mm_size));
+}
+
+static void npu_create_phb(struct dt_node *dn)
+{
+ const struct dt_property *prop;
+ struct npu *p;
+ uint32_t links;
+ void *pmem;
+
+ /* Retrieve number of devices */
+ links = dt_prop_get_u32(dn, "ibm,links");
+ pmem = zalloc(sizeof(struct npu) + links * sizeof(struct npu_dev));
+ assert(pmem);
+
+ /* Populate PHB */
+ p = pmem;
+ p->index = dt_prop_get_u32(dn, "ibm,phb-index");
+ p->chip_id = dt_prop_get_u32(dn, "ibm,chip-id");
+ p->xscom_base = dt_prop_get_u32(dn, "ibm,xscom-base");
+ p->total_devices = links;
+
+ /* TODO: When hardware fences are implemented, detect them here */
+ p->fenced = false;
+
+ /* This is the AT base */
+ p->at_xscom = p->xscom_base + NPU_AT_SCOM_OFFSET;
+ p->at_regs = (void *)dt_get_address(dn, 0, NULL);
+
+ prop = dt_require_property(dn, "ibm,mmio-window", -1);
+ assert(prop->len >= (2 * sizeof(uint64_t)));
+ p->mm_base = ((const uint64_t *)prop->prop)[0];
+ p->mm_size = ((const uint64_t *)prop->prop)[1];
+
+ p->devices = pmem + sizeof(struct npu);
+
+ /* Interrupt */
+ p->base_lsi = p8_chip_irq_block_base(p->chip_id, P8_IRQ_BLOCK_MISC) +
+ NPU_LSI_IRQ_MIN;
+
+ /* Generic PHB */
+ p->phb.dt_node = dn;
+ p->phb.ops = &npu_ops;
+ p->phb.phb_type = phb_type_pcie_v3;
+
+ /* Populate devices */
+ npu_create_devices(dn, p);
+
+ /* Populate extra properties */
+ npu_add_phb_properties(p);
+
+ /* Register PHB */
+ pci_register_phb(&p->phb, OPAL_DYNAMIC_PHB_ID);
+
+ /* Initialize IODA cache */
+ npu_ioda_init(p);
+
+ /* Register interrupt source */
+ npu_register_irq(p);
+
+ /* Initialize hardware */
+ npu_hw_init(p);
+}
+
+void probe_npu(void)
+{
+ struct dt_node *np;
+
+ /* Scan NPU XSCOM nodes */
+ dt_for_each_compatible(dt_root, np, "ibm,power8-npu")
+ npu_probe_phb(np);
+
+ /* Scan newly created PHB nodes */
+ dt_for_each_compatible(dt_root, np, "ibm,power8-npu-pciex")
+ npu_create_phb(np);
+}
diff --git a/hw/nx-rng.c b/hw/nx-rng.c
index 063848d..f652bb5 100644
--- a/hw/nx-rng.c
+++ b/hw/nx-rng.c
@@ -70,7 +70,7 @@ void nx_create_rng_node(struct dt_node *node)
}
rng_len = (u64[]){ 0x1000, /* 4K */
0x10000, /* 64K */
- 0x400000000, /* 16G*/
+ 0x400000000UL, /* 16G*/
0x100000, /* 1M */
0x1000000 /* 16M */} [len];
diff --git a/hw/occ.c b/hw/occ.c
index 79140cc..0e3d953 100644
--- a/hw/occ.c
+++ b/hw/occ.c
@@ -99,6 +99,13 @@ static bool wait_for_all_occ_init(void)
chip->id);
return false;
}
+
+ if (!chip->occ_functional) {
+ prlog(PR_WARNING, "OCC: Chip: %x occ not functional\n",
+ chip->id);
+ continue;
+ }
+
/* Get PState table address */
occ_data_area = chip->homer_base + P8_HOMER_SAPPHIRE_DATA_OFFSET;
occ_data = (struct occ_pstate_table *)occ_data_area;
@@ -269,6 +276,11 @@ static bool cpu_pstates_prepare_core(struct proc_chip *chip, struct cpu_thread *
/* Set new pstate to core */
rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMCR), &tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "OCC: Failed to read from OCC in pstates init\n");
+ return false;
+ }
tmp = tmp & ~0xFFFF000000000000ULL;
pstate = ((uint64_t) pstate_nom) & 0xFF;
tmp = tmp | (pstate << 56) | (pstate << 48);
@@ -300,6 +312,12 @@ static bool cpu_pstates_prepare_core(struct proc_chip *chip, struct cpu_thread *
/* Just debug */
rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMSR), &tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "OCC: Failed to read back setting from OCC"
+ "in pstates init\n");
+ return false;
+ }
prlog(PR_DEBUG, "OCC: Chip %x Core %x PPMSR %016llx\n",
chip->id, core, tmp);
@@ -344,7 +362,7 @@ static void occ_throttle_poll(void *data __unused)
* Queue OCC_THROTTLE with throttle status as 0 to
* indicate all OCCs are active after a reset.
*/
- occ_msg.type = OCC_THROTTLE;
+ occ_msg.type = cpu_to_be64(OCC_THROTTLE);
occ_msg.chip = 0;
occ_msg.throttle_status = 0;
rc = _opal_queue_msg(OPAL_MSG_OCC, NULL, NULL, 3,
@@ -360,9 +378,9 @@ static void occ_throttle_poll(void *data __unused)
if ((occ_data->valid == 1) &&
(chip->throttle != occ_data->throttle) &&
(occ_data->throttle <= OCC_MAX_THROTTLE_STATUS)) {
- occ_msg.type = OCC_THROTTLE;
- occ_msg.chip = chip->id;
- occ_msg.throttle_status = occ_data->throttle;
+ occ_msg.type = cpu_to_be64(OCC_THROTTLE);
+ occ_msg.chip = cpu_to_be64(chip->id);
+ occ_msg.throttle_status = cpu_to_be64(occ_data->throttle);
rc = _opal_queue_msg(OPAL_MSG_OCC, NULL,
occ_msg_consumed,
3, (uint64_t *)&occ_msg);
@@ -436,6 +454,50 @@ struct occ_load_req {
};
static LIST_HEAD(occ_load_req_list);
+int find_master_and_slave_occ(uint64_t **master, uint64_t **slave,
+ int *nr_masters, int *nr_slaves)
+{
+ struct proc_chip *chip;
+ int nr_chips = 0, i;
+ uint64_t chipids[MAX_CHIPS];
+
+ for_each_chip(chip) {
+ chipids[nr_chips++] = chip->id;
+ }
+
+ chip = next_chip(NULL);
+ /*
+ * Proc0 is the master OCC for Tuleta/Alpine boxes.
+ * Hostboot expects the pair of chips for MURANO, so pass the sibling
+ * chip id along with proc0 to hostboot.
+ */
+ *nr_masters = (chip->type == PROC_CHIP_P8_MURANO) ? 2 : 1;
+ *master = (uint64_t *)malloc(*nr_masters * sizeof(uint64_t));
+
+ if (!*master) {
+ printf("OCC: master array alloc failure\n");
+ return -ENOMEM;
+ }
+
+ if (nr_chips - *nr_masters > 0) {
+ *nr_slaves = nr_chips - *nr_masters;
+ *slave = (uint64_t *)malloc(*nr_slaves * sizeof(uint64_t));
+ if (!*slave) {
+ printf("OCC: slave array alloc failure\n");
+ return -ENOMEM;
+ }
+ }
+
+ for (i = 0; i < nr_chips; i++) {
+ if (i < *nr_masters) {
+ *(*master + i) = chipids[i];
+ continue;
+ }
+ *(*slave + i - *nr_masters) = chipids[i];
+ }
+ return 0;
+}
+
static void occ_queue_load(u8 scope, u32 dbob_id, u32 seq_id)
{
struct occ_load_req *occ_req;
@@ -467,7 +529,7 @@ static void __occ_do_load(u8 scope, u32 dbob_id __unused, u32 seq_id)
prlog(PR_INFO, "OCC: Load: Fallback to preloaded image\n");
rc = 0;
} else if (!rc) {
- struct opal_occ_msg occ_msg = { OCC_LOAD, 0, 0 };
+ struct opal_occ_msg occ_msg = { CPU_TO_BE64(OCC_LOAD), 0, 0 };
rc = _opal_queue_msg(OPAL_MSG_OCC, NULL, NULL, 3,
(uint64_t *)&occ_msg);
diff --git a/hw/p5ioc2-phb.c b/hw/p5ioc2-phb.c
deleted file mode 100644
index 06c2cc2..0000000
--- a/hw/p5ioc2-phb.c
+++ /dev/null
@@ -1,1206 +0,0 @@
-/* Copyright 2013-2014 IBM Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <skiboot.h>
-#include <p5ioc2.h>
-#include <p5ioc2-regs.h>
-#include <io.h>
-#include <timebase.h>
-#include <affinity.h>
-#include <pci.h>
-#include <pci-cfg.h>
-#include <interrupts.h>
-#include <ccan/str/str.h>
-
-#define PHBDBG(p, fmt, a...) prlog(PR_DEBUG, "PHB%d: " fmt, \
- (p)->phb.opal_id, ## a)
-#define PHBERR(p, fmt, a...) prlog(PR_ERR, "PHB%d: " fmt, \
- (p)->phb.opal_id, ## a)
-
-/*
- * Lock callbacks. Allows the OPAL API handlers to lock the
- * PHB around calls such as config space, EEH, etc...
- */
-static void p5ioc2_phb_lock(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
-
- lock(&p->lock);
-}
-
-static void p5ioc2_phb_unlock(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
-
- unlock(&p->lock);
-}
-
-/*
- * Configuration space access
- *
- * The PHB lock is assumed to be already held
- */
-static int64_t p5ioc2_pcicfg_address(struct p5ioc2_phb *p, uint32_t bdfn,
- uint32_t offset, uint32_t size)
-{
- uint32_t addr, sm = size - 1;
-
- if (bdfn > 0xffff)
- return OPAL_PARAMETER;
- /* XXX Should we enable 4K config space on PCI-X 2.0 ? */
- if ((offset > 0xff && !p->is_pcie) || offset > 0xfff)
- return OPAL_PARAMETER;
- if (offset & sm)
- return OPAL_PARAMETER;
-
- /* The root bus only has a device at 0 and we get into an
- * error state if we try to probe beyond that, so let's
- * avoid that and just return an error to Linux
- */
- if (p->is_pcie && (bdfn >> 8) == 0 && (bdfn & 0xff))
- return OPAL_HARDWARE;
-
- /* Prevent special operation generation */
- if (((bdfn >> 3) & 0x1f) == 0x1f)
- return OPAL_HARDWARE;
-
- /* Check PHB state */
- if (p->state == P5IOC2_PHB_STATE_BROKEN)
- return OPAL_HARDWARE;
-
- /* Additionally, should we prevent writes to the PHB own
- * bus number register ?
- */
-
- addr = CAP_PCADR_ENABLE;
- addr = SETFIELD(CAP_PCADR_BDFN, addr, bdfn);
- addr = SETFIELD(CAP_PCADR_EXTOFF, addr, offset >> 8);
- addr |= (offset & 0xff);
- out_le32(p->regs + CAP_PCADR, addr);
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_pcicfg_read8(struct phb *phb, uint32_t bdfn,
- uint32_t offset, uint8_t *data)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- int64_t rc;
-
- /* Initialize data in case of error */
- *data = 0xff;
-
- rc = p5ioc2_pcicfg_address(p, bdfn, offset, 1);
- if (rc)
- return rc;
-
- *data = in_8(p->regs + CAP_PCDAT + (offset & 3));
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_pcicfg_read16(struct phb *phb, uint32_t bdfn,
- uint32_t offset, uint16_t *data)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- int64_t rc;
-
- /* Initialize data in case of error */
- *data = 0xffff;
-
- rc = p5ioc2_pcicfg_address(p, bdfn, offset, 2);
- if (rc)
- return rc;
-
- *data = in_le16(p->regs + CAP_PCDAT + (offset & 3));
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_pcicfg_read32(struct phb *phb, uint32_t bdfn,
- uint32_t offset, uint32_t *data)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- int64_t rc;
-
- /* Initialize data in case of error */
- *data = 0xffffffff;
-
- rc = p5ioc2_pcicfg_address(p, bdfn, offset, 4);
- if (rc)
- return rc;
-
- *data = in_le32(p->regs + CAP_PCDAT);
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_pcicfg_write8(struct phb *phb, uint32_t bdfn,
- uint32_t offset, uint8_t data)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- int64_t rc;
-
- rc = p5ioc2_pcicfg_address(p, bdfn, offset, 1);
- if (rc)
- return rc;
-
- out_8(p->regs + CAP_PCDAT + (offset & 3), data);
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_pcicfg_write16(struct phb *phb, uint32_t bdfn,
- uint32_t offset, uint16_t data)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- int64_t rc;
-
- rc = p5ioc2_pcicfg_address(p, bdfn, offset, 2);
- if (rc)
- return rc;
-
- out_le16(p->regs + CAP_PCDAT + (offset & 3), data);
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_pcicfg_write32(struct phb *phb, uint32_t bdfn,
- uint32_t offset, uint32_t data)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- int64_t rc;
-
- rc = p5ioc2_pcicfg_address(p, bdfn, offset, 4);
- if (rc)
- return rc;
-
- out_le32(p->regs + CAP_PCDAT, data);
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_presence_detect(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint16_t slotstat;
- int64_t rc;
-
- if (!p->is_pcie) {
- uint32_t lsr;
-
- lsr = in_be32(p->regs + SHPC_LOGICAL_SLOT);
- if (GETFIELD(SHPC_LOGICAL_SLOT_PRSNT, lsr)
- != SHPC_SLOT_STATE_EMPTY)
- return OPAL_SHPC_DEV_PRESENT;
- else
- return OPAL_SHPC_DEV_NOT_PRESENT;
- }
-
- rc = p5ioc2_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_SLOTSTAT,
- &slotstat);
- if (rc || !(slotstat & PCICAP_EXP_SLOTSTAT_PDETECTST))
- return OPAL_SHPC_DEV_NOT_PRESENT;
- return OPAL_SHPC_DEV_PRESENT;
-}
-
-static int64_t p5ioc2_link_state(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint16_t lstat;
- int64_t rc;
-
- /* XXX Test for PHB in error state ? */
- if (!p->is_pcie)
- return OPAL_SHPC_LINK_UP_x1;
-
- rc = p5ioc2_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_LSTAT,
- &lstat);
- if (rc < 0) {
- /* Shouldn't happen */
- PHBERR(p, "Failed to read link status\n");
- return OPAL_HARDWARE;
- }
- if (!(lstat & PCICAP_EXP_LSTAT_DLLL_ACT))
- return OPAL_SHPC_LINK_DOWN;
- return GETFIELD(PCICAP_EXP_LSTAT_WIDTH, lstat);
-}
-
-static int64_t p5ioc2_power_state(struct phb *phb __unused)
-{
- /* XXX FIXME */
-#if 0
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint64_t reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
-
- /* XXX Test for PHB in error state ? */
-
- if (reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
- return OPAL_SHPC_POWER_ON;
-
- return OPAL_SHPC_POWER_OFF;
-#else
- return OPAL_SHPC_POWER_ON;
-#endif
-}
-
-/* p5ioc2_sm_slot_power_off - Slot power off state machine
- */
-static int64_t p5ioc2_sm_slot_power_off(struct p5ioc2_phb *p)
-{
- switch(p->state) {
- default:
- break;
- }
-
- /* Unknown state, hardware error ? */
- return OPAL_HARDWARE;
-}
-
-static int64_t p5ioc2_slot_power_off(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
-
- if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
- return OPAL_BUSY;
-
- /* run state machine */
- return p5ioc2_sm_slot_power_off(p);
-}
-
-static int64_t p5ioc2_sm_slot_power_on(struct p5ioc2_phb *p __unused)
-{
-#if 0
- uint64_t reg;
- uint32_t reg32;
- uint16_t brctl;
-
- switch(p->state) {
- case P5IOC2_PHB_STATE_FUNCTIONAL:
- /* Check presence */
- reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
- if (!(reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)) {
- PHBDBG(p, "Slot power on: no device\n");
- return OPAL_CLOSED;
- }
-
- /* Adjust UTL interrupt settings to disable various
- * errors that would interfere with the process
- */
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000);
-
- /* If the power is not on, turn it on now */
- if (!(reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)) {
- reg = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
- reg &= ~(0x8c00000000000000ul);
- reg |= 0x8400000000000000ul;
- out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg);
- p->state = PHB_STATE_SPUP_STABILIZE_DELAY;
- PHBDBG(p, "Slot power on: powering on...\n");
- return p5ioc2_set_sm_timeout(p, secs_to_tb(2));
- }
- /* Power is already on */
- power_ok:
- /* Ensure hot reset is deasserted */
- p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
- brctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
- p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
- p->retries = 40;
- p->state = PHB_STATE_SPUP_WAIT_LINK;
- PHBDBG(p, "Slot power on: waiting for link\n");
- /* Fall through */
- case PHB_STATE_SPUP_WAIT_LINK:
- reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
- /* Link is up ? Complete */
-
- /* XXX TODO: Check link width problem and if present
- * go straight to the host reset code path.
- */
- if (reg & PHB_PCIE_DLP_TC_DL_LINKACT) {
- /* Restore UTL interrupts */
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN,
- 0xfe65000000000000);
- p->state = PHB_STATE_FUNCTIONAL;
- PHBDBG(p, "Slot power on: up !\n");
- return OPAL_SUCCESS;
- }
- /* Retries */
- p->retries--;
- if (p->retries == 0) {
- /* XXX Improve logging */
- PHBERR(p,"Slot power on: Timeout waiting for link\n");
- goto error;
- }
- /* Check time elapsed */
- if ((p->retries % 20) != 0)
- return p5ioc2_set_sm_timeout(p, msecs_to_tb(10));
-
- /* >200ms, time to try a hot reset after clearing the
- * link status bit (doco says to do so)
- */
- out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x0080000000000000);
-
- /* Mask receiver error status in AER */
- p5ioc2_pcicfg_read32(&p->phb, 0,
- p->aercap + PCIECAP_AER_CE_MASK, &reg32);
- reg32 |= PCIECAP_AER_CE_RECVR_ERR;
- p5ioc2_pcicfg_write32(&p->phb, 0,
- p->aercap + PCIECAP_AER_CE_MASK, reg32);
-
- /* Turn on host reset */
- p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
- brctl |= PCI_CFG_BRCTL_SECONDARY_RESET;
- p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
- p->state = PHB_STATE_SPUP_HOT_RESET_DELAY;
- PHBDBG(p, "Slot power on: soft reset...\n");
- return p5ioc2_set_sm_timeout(p, secs_to_tb(1));
- case PHB_STATE_SPUP_HOT_RESET_DELAY:
- /* Turn off host reset */
- p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
- brctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
- p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
- /* Clear spurious errors */
- out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x00e0000000000000);
- p5ioc2_pcicfg_write32(&p->phb, 0,
- p->aercap + PCIECAP_AER_CE_STATUS,
- PCIECAP_AER_CE_RECVR_ERR);
- /* Unmask receiver error status in AER */
- p5ioc2_pcicfg_read32(&p->phb, 0,
- p->aercap + PCIECAP_AER_CE_MASK, &reg32);
- reg32 &= ~PCIECAP_AER_CE_RECVR_ERR;
- p5ioc2_pcicfg_write32(&p->phb, 0,
- p->aercap + PCIECAP_AER_CE_MASK, reg32);
- /* Go back to waiting for link */
- p->state = PHB_STATE_SPUP_WAIT_LINK;
- PHBDBG(p, "Slot power on: waiting for link (2)\n");
- return p5ioc2_set_sm_timeout(p, msecs_to_tb(10));
-
- case PHB_STATE_SPUP_STABILIZE_DELAY:
- /* Come here after the 2s delay after power up */
- p->retries = 1000;
- p->state = PHB_STATE_SPUP_SLOT_STATUS;
- PHBDBG(p, "Slot power on: waiting for power\n");
- /* Fall through */
- case PHB_STATE_SPUP_SLOT_STATUS:
- reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
-
- /* Doc says to check LED status, but we ignore that, there
- * no point really and it's easier that way
- */
- if (reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
- goto power_ok;
- if (p->retries-- == 0) {
- /* XXX Improve error logging */
- PHBERR(p, "Timeout powering up slot\n");
- goto error;
- }
- return p5ioc2_set_sm_timeout(p, msecs_to_tb(10));
- default:
- break;
- }
-
- /* Unknown state, hardware error ? */
- error:
- p->state = PHB_STATE_FUNCTIONAL;
- return OPAL_HARDWARE;
-#else
- return OPAL_SUCCESS;
-#endif
-}
-
-static int64_t p5ioc2_slot_power_on(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
-
- if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
- return OPAL_BUSY;
-
- /* run state machine */
- return p5ioc2_sm_slot_power_on(p);
-}
-
-static int64_t p5ioc2_sm_hot_reset(struct p5ioc2_phb *p)
-{
- switch(p->state) {
- default:
- break;
- }
-
- /* Unknown state, hardware error ? */
- return OPAL_HARDWARE;
-}
-
-static int64_t p5ioc2_hot_reset(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
-
- if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
- return OPAL_BUSY;
-
- /* run state machine */
- return p5ioc2_sm_hot_reset(p);
-}
-
-static int64_t p5ioc2_sm_freset(struct p5ioc2_phb *p)
-{
- switch(p->state) {
- default:
- break;
- }
-
- /* XXX Not implemented, return success to make
- * pci.c happy, otherwise probing of slots will
- * fail
- */
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_freset(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
-
- if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
- return OPAL_BUSY;
-
- /* run state machine */
- return p5ioc2_sm_freset(p);
-}
-
-static int64_t p5ioc2_poll(struct phb *phb)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint64_t now = mftb();
-
- if (p->state == P5IOC2_PHB_STATE_FUNCTIONAL)
- return OPAL_SUCCESS;
-
- /* Check timer */
- if (p->delay_tgt_tb &&
- tb_compare(now, p->delay_tgt_tb) == TB_ABEFOREB)
- return p->delay_tgt_tb - now;
-
- /* Expired (or not armed), clear it */
- p->delay_tgt_tb = 0;
-
-#if 0
- /* Dispatch to the right state machine */
- switch(p->state) {
- case PHB_STATE_SPUP_STABILIZE_DELAY:
- case PHB_STATE_SPUP_SLOT_STATUS:
- case PHB_STATE_SPUP_WAIT_LINK:
- case PHB_STATE_SPUP_HOT_RESET_DELAY:
- return p5ioc2_sm_slot_power_on(p);
- case PHB_STATE_SPDOWN_STABILIZE_DELAY:
- case PHB_STATE_SPDOWN_SLOT_STATUS:
- return p5ioc2_sm_slot_power_off(p);
- case PHB_STATE_HRESET_DELAY:
- return p5ioc2_sm_hot_reset(p);
- default:
- break;
- }
-#endif
- /* Unknown state, could be a HW error */
- return OPAL_HARDWARE;
-}
-
-static int64_t p5ioc2_eeh_freeze_status(struct phb *phb, uint64_t pe_number,
- uint8_t *freeze_state,
- uint16_t *pci_error_type,
- uint16_t *severity,
- uint64_t *phb_status __unused)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint32_t cfgrw;
-
- /* Defaults: not frozen */
- *freeze_state = OPAL_EEH_STOPPED_NOT_FROZEN;
- *pci_error_type = OPAL_EEH_NO_ERROR;
- if (severity)
- *severity = OPAL_EEH_SEV_NO_ERROR;
-
- if (pe_number != 0)
- return OPAL_PARAMETER;
-
- /* XXX Handle PHB status */
- /* XXX We currently only check for PE freeze, not fence */
-
- cfgrw = in_be32(p->regs + CAP_PCFGRW);
- if (cfgrw & CAP_PCFGRW_MMIO_FROZEN)
- *freeze_state |= OPAL_EEH_STOPPED_MMIO_FREEZE;
- if (cfgrw & CAP_PCFGRW_DMA_FROZEN)
- *freeze_state |= OPAL_EEH_STOPPED_DMA_FREEZE;
-
- if (severity &&
- (cfgrw & (CAP_PCFGRW_MMIO_FROZEN | CAP_PCFGRW_MMIO_FROZEN)))
- *severity = OPAL_EEH_SEV_PE_ER;
-
- /* XXX Don't bother populating pci_error_type */
- /* Should read the bits from PLSSR */
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_eeh_next_error(struct phb *phb, uint64_t *first_frozen_pe,
- uint16_t *pci_error_type, uint16_t *severity)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint32_t cfgrw;
-
- /* XXX Don't bother */
- *pci_error_type = OPAL_EEH_NO_ERROR;
- *first_frozen_pe = 0;
-
- cfgrw = in_be32(p->regs + CAP_PCFGRW);
- if (cfgrw & (CAP_PCFGRW_MMIO_FROZEN | CAP_PCFGRW_MMIO_FROZEN))
- *severity = OPAL_EEH_SEV_PE_ER;
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_eeh_freeze_clear(struct phb *phb, uint64_t pe_number,
- uint64_t eeh_action_token)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint32_t cfgrw;
-
- if (pe_number != 0)
- return OPAL_PARAMETER;
-
- /*
- * This sequence isn't very well documented. We play guess
- * games based on the documentation, what we do on P7IOC,
- * and common sense.
- *
- * Basically we start from the low level (UTL), clear all
- * error conditions there. Then we clear error conditions
- * in the PLSSR and DMACSR.
- *
- * Once that's done, we unfreeze the PHB
- *
- * Note: Should we also clear the error bits in the config
- * space ? The docs don't say anything... TODO: Check what
- * OPAL does if possible or ask Milton.
- */
-
- /* Clear UTL error regs on PCIe */
- if (p->is_pcie) {
- uint32_t err;
-
- err = in_be32(p->regs + UTL_SYS_BUS_AGENT_STATUS);
- out_be32(p->regs + UTL_SYS_BUS_AGENT_STATUS, err);
- err = in_be32(p->regs + UTL_PCIE_PORT_STATUS);
- out_be32(p->regs + UTL_PCIE_PORT_STATUS, err);
- err = in_be32(p->regs + UTL_RC_STATUS);
- out_be32(p->regs + UTL_RC_STATUS, err);
- }
-
- /* XXX We should probably clear the error regs in the cfg space... */
-
- /* Clear PLSSR and DMACSR */
- out_be32(p->regs + CAP_DMACSR, 0);
- out_be32(p->regs + CAP_PLSSR, 0);
-
- /* Clear freeze state as requested */
- cfgrw = in_be32(p->regs + CAP_PCFGRW);
- if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO) {
- cfgrw &= ~CAP_PCFGRW_MMIO_FROZEN;
- out_be32(p->regs + CAP_PCFGRW, cfgrw);
- }
- if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_DMA) {
- cfgrw &= ~CAP_PCFGRW_DMA_FROZEN;
- out_be32(p->regs + CAP_PCFGRW, cfgrw);
- }
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_get_msi_64(struct phb *phb __unused, uint32_t mve_number,
- uint32_t xive_num, uint8_t msi_range,
- uint64_t *msi_address, uint32_t *message_data)
-{
- if (mve_number > 255 || xive_num > 255 || msi_range != 1)
- return OPAL_PARAMETER;
-
- *msi_address = 0x1000000000000000ul;
- *message_data = xive_num;
-
- return OPAL_SUCCESS;
-}
-
-static uint8_t p5ioc2_choose_bus(struct phb *phb __unused,
- struct pci_device *bridge __unused,
- uint8_t candidate, uint8_t *max_bus __unused,
- bool *use_max)
-{
- /* Use standard bus number selection */
- *use_max = false;
- return candidate;
-}
-
-/* p5ioc2_phb_ioda_reset - Reset the IODA tables
- *
- * This reset the IODA tables in the PHB. It is called at
- * initialization time, on PHB reset, and can be called
- * explicitly from OPAL
- *
- * Note: We don't handle EEH on p5ioc2, we use no cache
- * and thus always purge
- */
-static int64_t p5ioc2_ioda_reset(struct phb *phb, bool purge __unused)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- unsigned int i;
-
- /* Init XIVRs */
- for (i = 0; i < 16; i++) {
- p->xive_cache[i] = SETFIELD(CAP_XIVR_PRIO, 0, 0xff);
- out_be32(p->regs + CAP_XIVRn(i), 0x000000ff);
- }
-
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_set_phb_tce_memory(struct phb *phb,
- uint64_t tce_mem_addr,
- uint64_t tce_mem_size)
-{
- struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
- uint64_t tar;
- uint32_t cfg;
-
- printf("PHB%d: set_tce_memory: 0x%016llx 0x%016llx\n",
- p->index, tce_mem_addr, tce_mem_size);
- printf("PHB%d: bridge values : 0x%016llx 0x%016llx\n",
- p->index, p->ioc->tce_base, p->ioc->tce_size);
-
- /* First check if it fits in the memory established for
- * the IO HUB
- */
- if (tce_mem_addr &&
- (tce_mem_addr < p->ioc->tce_base ||
- tce_mem_addr > (p->ioc->tce_base + p->ioc->tce_size) ||
- (tce_mem_addr + tce_mem_size) >
- (p->ioc->tce_base + p->ioc->tce_size))) {
- prerror("PHB%d: TCEs not in bridge range\n", p->index);
- return OPAL_PARAMETER;
- }
-
- /* Supported sizes are power of two's naturally aligned
- * and between 64K and 8M (p5ioc2 spec)
- */
- if (tce_mem_addr && !is_pow2(tce_mem_size)) {
- prerror("PHB%d: Size is not a power of 2\n", p->index);
- return OPAL_PARAMETER;
- }
- if (tce_mem_addr & (tce_mem_size - 1)) {
- prerror("PHB%d: Not naturally aligned\n", p->index);
- return OPAL_PARAMETER;
- }
- if (tce_mem_addr &&
- (tce_mem_size < 0x10000 || tce_mem_size > 0x800000)) {
- prerror("PHB%d: Size out of range\n", p->index);
- return OPAL_PARAMETER;
- }
-
- /* First we disable TCEs in the bridge */
- cfg = in_be32(p->regs + CAP_PCFGRW);
- cfg &= ~CAP_PCFGRW_TCE_EN;
- out_be32(p->regs + CAP_PCFGRW, cfg);
-
-
- /* Now there's a blurb in the spec about all TARm needing
- * to have the same size.. I will let that as a surprise
- * for the user ... Linux does it fine and I'd rather not
- * keep more state to check than I need to
- */
- tar = 0;
- if (tce_mem_addr) {
- tar = SETFIELD(CA_TAR_HUBID, 0ul, p->ca ? 4 : 1);
- tar = SETFIELD(CA_TAR_ALTHUBID, tar, p->ca ? 4 : 1);
- tar = SETFIELD(CA_TAR_NUM_TCE, tar, ilog2(tce_mem_size) - 16);
- tar |= tce_mem_addr; /* addr is naturally aligned */
- tar |= CA_TAR_VALID;
- printf("PHB%d: Writing TAR: 0x%016llx\n", p->index, tar);
- }
- out_be64(p->ca_regs + CA_TARn(p->index), tar);
-
- /* Now set the TCE enable if we set a valid address */
- if (tce_mem_addr) {
- cfg |= CAP_PCFGRW_TCE_EN;
- out_be32(p->regs + CAP_PCFGRW, cfg);
- }
-
- return OPAL_SUCCESS;
-}
-
-
-static const struct phb_ops p5ioc2_phb_ops = {
- .lock = p5ioc2_phb_lock,
- .unlock = p5ioc2_phb_unlock,
- .cfg_read8 = p5ioc2_pcicfg_read8,
- .cfg_read16 = p5ioc2_pcicfg_read16,
- .cfg_read32 = p5ioc2_pcicfg_read32,
- .cfg_write8 = p5ioc2_pcicfg_write8,
- .cfg_write16 = p5ioc2_pcicfg_write16,
- .cfg_write32 = p5ioc2_pcicfg_write32,
- .choose_bus = p5ioc2_choose_bus,
- .eeh_freeze_status = p5ioc2_eeh_freeze_status,
- .eeh_freeze_clear = p5ioc2_eeh_freeze_clear,
- .next_error = p5ioc2_eeh_next_error,
- .get_msi_64 = p5ioc2_get_msi_64,
- .ioda_reset = p5ioc2_ioda_reset,
- .set_phb_tce_memory = p5ioc2_set_phb_tce_memory,
- .presence_detect = p5ioc2_presence_detect,
- .link_state = p5ioc2_link_state,
- .power_state = p5ioc2_power_state,
- .slot_power_off = p5ioc2_slot_power_off,
- .slot_power_on = p5ioc2_slot_power_on,
- .hot_reset = p5ioc2_hot_reset,
- .fundamental_reset = p5ioc2_freset,
- .poll = p5ioc2_poll,
-};
-
-/* p5ioc2_phb_get_xive - Interrupt control from OPAL */
-static int64_t p5ioc2_phb_get_xive(void *data, uint32_t isn,
- uint16_t *server, uint8_t *prio)
-{
- struct p5ioc2_phb *p = data;
- uint32_t irq, xivr, fbuid = P7_IRQ_FBUID(isn);
-
- if (fbuid != p->buid)
- return OPAL_PARAMETER;
- irq = isn & 0xf;
-
- xivr = p->xive_cache[irq];
- *server = GETFIELD(CAP_XIVR_SERVER, xivr);
- *prio = GETFIELD(CAP_XIVR_PRIO, xivr);
-
- return OPAL_SUCCESS;
-}
-
-/* p5ioc2_phb_set_xive - Interrupt control from OPAL */
-static int64_t p5ioc2_phb_set_xive(void *data, uint32_t isn,
- uint16_t server, uint8_t prio)
-{
- struct p5ioc2_phb *p = data;
- uint32_t irq, xivr, fbuid = P7_IRQ_FBUID(isn);
-
- if (fbuid != p->buid)
- return OPAL_PARAMETER;
- irq = isn & 0xf;
-
- printf("PHB%d: Set XIVE isn %04x (irq=%d) server=%x, prio=%x\n",
- p->index, isn, irq, server, prio);
-
- xivr = SETFIELD(CAP_XIVR_SERVER, 0, server);
- xivr = SETFIELD(CAP_XIVR_PRIO, xivr, prio);
- p->xive_cache[irq] = xivr;
-
- /* Now we mangle the server and priority */
- if (prio == 0xff) {
- server = 0;
- prio = 0xff;
- } else {
- prio = (prio >> 3) | ((server & 7) << 5);
- server = server >> 3;
- }
-
- /* We use HRT entry 0 always for now */
- xivr = SETFIELD(CAP_XIVR_SERVER, 0, server);
- xivr = SETFIELD(CAP_XIVR_PRIO, xivr, prio);
- out_be32(p->regs + CAP_XIVRn(irq), xivr);
- printf("PHB%d: wrote 0x%08x to XIVR %d\n", p->index, xivr, irq);
-
- return OPAL_SUCCESS;
-}
-
-/* IRQ ops for OS interrupts (not internal) */
-static const struct irq_source_ops p5ioc2_phb_os_irq_ops = {
- .get_xive = p5ioc2_phb_get_xive,
- .set_xive = p5ioc2_phb_set_xive,
-};
-
-
-static void p5ioc2_phb_init_utl(struct p5ioc2_phb *p __unused)
-{
- /* XXX FIXME */
-}
-
-static void p5ioc2_phb_init_pcie(struct p5ioc2_phb *p)
-{
- int64_t ecap, aercap;
-
- ecap = pci_find_cap(&p->phb, 0, PCI_CFG_CAP_ID_EXP);
- if (ecap < 0) {
- /* Shouldn't happen */
- prerror("P5IOC2: Failed to locate PCI-E cap in bridge\n");
- return;
- }
- p->ecap = ecap;
-
- aercap = pci_find_ecap(&p->phb, 0, PCIECAP_ID_AER, NULL);
- if (aercap < 0) {
- /* Shouldn't happen */
- prerror("P5IOC2: Failed to locate AER ext cap in bridge\n");
- return;
- }
- p->aercap = aercap;
-
- /* XXX plenty more to do ... */
-}
-
-static void p5ioc2_phb_hwinit(struct p5ioc2_phb *p)
-{
- uint16_t pcicmd;
- uint32_t phbid;
-
- printf("P5IOC2: Initializing PHB HW...\n");
-
- /* Enable PHB and and disable address decoding */
- phbid = in_be32(p->ca_regs + CA_PHBIDn(p->index));
- phbid |= CA_PHBID_PHB_ENABLE;
- phbid &= ~CA_PHBID_ADDRSPACE_ENABLE;
- out_be32(p->ca_regs + CA_PHBIDn(p->index), phbid);
-
- /* Set BUID */
- out_be32(p->regs + CAP_BUID, SETFIELD(CAP_BUID_MASK, 0,
- P7_BUID_BASE(p->buid)));
- out_be32(p->regs + CAP_MSIBASE, P7_BUID_BASE(p->buid) << 16);
-
- /* Set IO and Memory mapping */
- out_be32(p->regs + CAP_IOAD_H, hi32(p->io_base + IO_PCI_START));
- out_be32(p->regs + CAP_IOAD_L, lo32(p->io_base + IO_PCI_START));
- out_be32(p->regs + CAP_IOSZ, ~(IO_PCI_SIZE - 1));
- out_be32(p->regs + CAP_IO_ST, IO_PCI_START);
- out_be32(p->regs + CAP_MEM1_H, hi32(p->mm_base + MM_PCI_START));
- out_be32(p->regs + CAP_MEM1_L, lo32(p->mm_base + MM_PCI_START));
- out_be32(p->regs + CAP_MSZ1, ~(MM_PCI_SIZE - 1));
- out_be32(p->regs + CAP_MEM_ST, MM_PCI_START);
-
- /* Setup the MODE registers. We captures the values used
- * by pHyp/OPAL
- */
- out_be32(p->regs + CAP_MODE0, 0x00800010);
- out_be32(p->regs + CAP_MODE1, 0x00800000);
- out_be32(p->regs + CAP_MODE3, 0xFFC00050);
- if (p->is_pcie)
- out_be32(p->regs + CAP_MODE2, 0x00000400);
- else
- out_be32(p->regs + CAP_MODE2, 0x00000408);
-
- /* XXX Setup of the arbiter... not sure what to do here,
- * probably system specific (depends on whow things are
- * wired on the motherboard). I set things up based on
- * the values I read on a Juno machine. We setup the BPR
- * with the various timeouts etc... as well based one
- * similarly captured values
- */
- if (p->is_pcie) {
- out_be32(p->regs + CAP_AER, 0x04000000);
- out_be32(p->regs + CAP_BPR, 0x0000004f);
- } else {
- out_be32(p->regs + CAP_AER, 0x84000000);
- out_be32(p->regs + CAP_BPR, 0x000f00ff);
- }
-
- /* XXX Setup error reporting registers */
-
- /* Clear errors in PLSSR and DMACSR */
- out_be32(p->regs + CAP_DMACSR, 0);
- out_be32(p->regs + CAP_PLSSR, 0);
-
- /* Configure MSIs on PCIe only */
- if (p->is_pcie) {
- /* XXX Check that setting ! That's what OPAL uses but
- * I suspect it might not be correct. We enable a masking
- * of 3 bits and no offset, which makes me think only
- * some MSIs will work... not 100% certain.
- */
- out_be32(p->regs + CAP_MVE0, CAP_MVE_VALID |
- SETFIELD(CAP_MVE_TBL_OFF, 0, 0) |
- SETFIELD(CAP_MVE_NUM_INT, 0, 0x3));
- out_be32(p->regs + CAP_MVE1, 0);
- }
-
- /* Configuration. We keep TCEs disabled */
- out_be32(p->regs + CAP_PCFGRW,
- CAP_PCFGRW_ERR_RECOV_EN |
- CAP_PCFGRW_FREEZE_EN |
- CAP_PCFGRW_DAC_DISABLE |
- (p->is_pcie ? CAP_PCFGRW_MSI_EN : 0));
-
- /* Re-enable address decode */
- phbid |= CA_PHBID_ADDRSPACE_ENABLE;
- out_be32(p->ca_regs + CA_PHBIDn(p->index), phbid);
-
- /* PCIe specific inits */
- if (p->is_pcie) {
- p5ioc2_phb_init_utl(p);
- p5ioc2_phb_init_pcie(p);
- }
-
- /* Take out reset pins on PCI-X. PCI-E will be handled via the hotplug
- * controller separately
- */
- if (!p->is_pcie) {
- uint32_t val;
-
- /* Setting 1's will deassert the reset signals */
- out_be32(p->regs + CAP_CRR, CAP_CRR_RESET1 | CAP_CRR_RESET2);
-
- /* Set max sub bus */
- p5ioc2_pcicfg_write8(&p->phb, 0, 0x41, 0xff);
-
- /* XXX SHPC stuff */
- printf("P5IOC2: SHPC Slots available 1 : %08x\n",
- in_be32(p->regs + 0xb20));
- printf("P5IOC2: SHPC Slots available 2 : %08x\n",
- in_be32(p->regs + 0xb24));
- printf("P5IOC2: SHPC Slots config : %08x\n",
- in_be32(p->regs + 0xb28));
- printf("P5IOC2: SHPC Secondary bus conf : %08x\n",
- in_be32(p->regs + 0xb2c));
-
- p5ioc2_pcicfg_read32(&p->phb, 0, 0, &val);
- printf("P5IOC2: val0: %08x\n", val);
- p5ioc2_pcicfg_read32(&p->phb, 0, 4, &val);
- printf("P5IOC2: val4: %08x\n", val);
- }
-
- /* Enable PCI command/status */
- p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_CMD, &pcicmd);
- pcicmd |= PCI_CFG_CMD_IO_EN | PCI_CFG_CMD_MEM_EN |
- PCI_CFG_CMD_BUS_MASTER_EN;
- p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_CMD, pcicmd);
-
- p->state = P5IOC2_PHB_STATE_FUNCTIONAL;
-}
-
-static void p5ioc2_pcie_add_node(struct p5ioc2_phb *p)
-{
- uint64_t reg[2], mmb, iob;
- uint32_t lsibase, icsp = get_ics_phandle();
- struct dt_node *np;
-
- reg[0] = cleanup_addr((uint64_t)p->regs);
- reg[1] = 0x1000;
-
- np = dt_new_addr(p->ioc->dt_node, "pciex", reg[0]);
- if (!np)
- return;
-
- p->phb.dt_node = np;
- dt_add_property_strings(np, "compatible", "ibm,p5ioc2-pciex");
- dt_add_property_strings(np, "device_type", "pciex");
- dt_add_property(np, "reg", reg, sizeof(reg));
- dt_add_property_cells(np, "#address-cells", 3);
- dt_add_property_cells(np, "#size-cells", 2);
- dt_add_property_cells(np, "#interrupt-cells", 1);
- dt_add_property_cells(np, "bus-range", 0, 0xff);
- dt_add_property_cells(np, "clock-frequency", 0x200, 0); /* ??? */
- dt_add_property_cells(np, "interrupt-parent", icsp);
- /* XXX FIXME: add phb own interrupts */
- dt_add_property_cells(np, "ibm,opal-num-pes", 1);
- dt_add_property_cells(np, "ibm,opal-msi-ranges", (p->buid << 4) + 5, 8);
- /* XXX FIXME: add slot-name */
- iob = cleanup_addr(p->io_base + IO_PCI_START);
- mmb = cleanup_addr(p->mm_base + MM_PCI_START);
- dt_add_property_cells(np, "ranges",
- /* IO space */
- 0x01000000, 0x00000000, 0x00000000,
- hi32(iob), lo32(iob), 0, IO_PCI_SIZE,
- /* M32 space */
- 0x02000000, 0x00000000, MM_PCI_START,
- hi32(mmb), lo32(mmb), 0, MM_PCI_SIZE);
-
- /* Add associativity properties */
- add_chip_dev_associativity(np);
-
- /* The interrupt maps will be generated in the RC node by the
- * PCI code based on the content of this structure:
- */
- lsibase = p->buid << 4;
- p->phb.lstate.int_size = 1;
- p->phb.lstate.int_val[0][0] = lsibase + 1;
- p->phb.lstate.int_val[1][0] = lsibase + 2;
- p->phb.lstate.int_val[2][0] = lsibase + 3;
- p->phb.lstate.int_val[3][0] = lsibase + 4;
- p->phb.lstate.int_parent[0] = icsp;
- p->phb.lstate.int_parent[1] = icsp;
- p->phb.lstate.int_parent[2] = icsp;
- p->phb.lstate.int_parent[3] = icsp;
-
- /* reset clear timestamp... to add if we do a reset and want
- * to avoid waiting in skiboot
- */
- //dt_property_cells("reset-clear-timestamp",....
-}
-
-static void p5ioc2_pcix_add_node(struct p5ioc2_phb *p)
-{
- uint64_t reg[2], mmb, iob;
- uint32_t lsibase, icsp = get_ics_phandle();
- struct dt_node *np;
-
- reg[0] = cleanup_addr((uint64_t)p->regs);
- reg[1] = 0x1000;
-
- np = dt_new_addr(p->ioc->dt_node, "pci", reg[0]);
- if (!np)
- return;
-
- p->phb.dt_node = np;
- dt_add_property_strings(np, "compatible", "ibm,p5ioc2-pcix");
- dt_add_property_strings(np, "device_type", "pci");
- dt_add_property(np, "reg", reg, sizeof(reg));
- dt_add_property_cells(np, "#address-cells", 3);
- dt_add_property_cells(np, "#size-cells", 2);
- dt_add_property_cells(np, "#interrupt-cells", 1);
- dt_add_property_cells(np, "bus-range", 0, 0xff);
- dt_add_property_cells(np, "clock-frequency", 0x200, 0); /* ??? */
- //dt_add_property_cells(np, "bus-width", 8); /* Figure out from VPD ? */
- dt_add_property_cells(np, "interrupt-parent", icsp);
- /* XXX FIXME: add phb own interrupts */
- dt_add_property_cells(np, "ibm,opal-num-pes", 1);
- /* XXX FIXME: add slot-name */
- iob = cleanup_addr(p->io_base + IO_PCI_START);
- mmb = cleanup_addr(p->mm_base + MM_PCI_START);
- dt_add_property_cells(np, "ranges",
- /* IO space */
- 0x01000000, 0x00000000, 0x00000000,
- hi32(iob), lo32(iob), 0, IO_PCI_SIZE,
- /* M32 space */
- 0x02000000, 0x00000000, MM_PCI_START,
- hi32(mmb), lo32(mmb), 0, MM_PCI_SIZE);
-
- /* Add associativity properties */
- add_chip_dev_associativity(np);
-
- /* The interrupt maps will be generated in the RC node by the
- * PCI code based on the content of this structure:
- */
- lsibase = p->buid << 4;
- p->phb.lstate.int_size = 1;
- p->phb.lstate.int_val[0][0] = lsibase + 1;
- p->phb.lstate.int_val[1][0] = lsibase + 2;
- p->phb.lstate.int_val[2][0] = lsibase + 3;
- p->phb.lstate.int_val[3][0] = lsibase + 4;
- p->phb.lstate.int_parent[0] = icsp;
- p->phb.lstate.int_parent[1] = icsp;
- p->phb.lstate.int_parent[2] = icsp;
- p->phb.lstate.int_parent[3] = icsp;
-
- /* On PCI-X we need to create an interrupt map here */
- pci_std_swizzle_irq_map(np, NULL, &p->phb.lstate, 0);
-}
-
-void p5ioc2_phb_setup(struct p5ioc2 *ioc, struct p5ioc2_phb *p,
- uint8_t ca, uint8_t index, bool active,
- uint32_t buid)
-{
- uint32_t phbid;
-
- p->index = index;
- p->ca = ca;
- p->ioc = ioc;
- p->active = active;
- p->phb.ops = &p5ioc2_phb_ops;
- p->buid = buid;
- p->ca_regs = ca ? ioc->ca1_regs : ioc->ca0_regs;
- p->regs = p->ca_regs + CA_PHBn_REGS(index);
-
- printf("P5IOC2: Initializing PHB %d on CA%d, regs @%p, BUID 0x%04x\n",
- p->index, p->ca, p->regs, p->buid);
-
- /* Memory map: described in p5ioc2.h */
- p->mm_base = ca ? ioc->ca1_mm_region : ioc->ca0_mm_region;
- p->mm_base += MM_WINDOW_SIZE * index;
- p->io_base = (uint64_t)p->ca_regs;
- p->io_base += IO_PCI_SIZE * (index + 1);
- p->state = P5IOC2_PHB_STATE_UNINITIALIZED;
-
- /* Query PHB type */
- phbid = in_be32(p->ca_regs + CA_PHBIDn(p->index));
-
- switch(GETFIELD(CA_PHBID_PHB_TYPE, phbid)) {
- case CA_PHBTYPE_PCIX1_0:
- p->is_pcie = false;
- p->phb.scan_map = 0x0003;
- p->phb.phb_type = phb_type_pcix_v1;
- printf("P5IOC2: PHB is PCI/PCI-X 1.0\n");
- break;
- case CA_PHBTYPE_PCIX2_0:
- p->is_pcie = false;
- p->phb.scan_map = 0x0003;
- p->phb.phb_type = phb_type_pcix_v2;
- printf("P5IOC2: PHB is PCI/PCI-X 2.0\n");
- break;
- case CA_PHBTYPE_PCIE_G1:
- p->is_pcie = true;
- p->phb.scan_map = 0x0001;
- p->phb.phb_type = phb_type_pcie_v1;
- printf("P5IOC2: PHB is PCI Express Gen 1\n");
- break;
- case CA_PHBTYPE_PCIE_G2:
- p->is_pcie = true;
- p->phb.scan_map = 0x0001;
- p->phb.phb_type = phb_type_pcie_v2;
- printf("P5IOC2: PHB is PCI Express Gen 2\n");
- break;
- default:
- printf("P5IOC2: Unknown PHB type ! phbid=%08x\n", phbid);
- p->is_pcie = true;
- p->phb.scan_map = 0x0001;
- p->phb.phb_type = phb_type_pcie_v1;
- }
-
- /* Find P5IOC2 base location code in IOC */
- p->phb.base_loc_code = dt_prop_get_def(ioc->dt_node,
- "ibm,io-base-loc-code", NULL);
- if (!p->phb.base_loc_code)
- prerror("P5IOC2: Base location code not found !\n");
-
- /* Add device nodes */
- if (p->is_pcie)
- p5ioc2_pcie_add_node(p);
- else
- p5ioc2_pcix_add_node(p);
-
- /* Initialize PHB HW */
- p5ioc2_phb_hwinit(p);
-
- /* Register all 16 interrupt sources for now as OS visible
- *
- * If we ever add some EEH, we might take out the error interrupts
- * and register them as OPAL internal interrupts instead
- */
- register_irq_source(&p5ioc2_phb_os_irq_ops, p, p->buid << 4, 16);
-
- /* We cannot query the PHB type yet as the registers aren't routed
- * so we'll do that in the inits, at which point we'll establish
- * the scan map
- */
-
- /* We register the PHB before we initialize it so we
- * get a useful OPAL ID for it
- */
- pci_register_phb(&p->phb);
-
- /* Platform additional setup */
- if (platform.pci_setup_phb)
- platform.pci_setup_phb(&p->phb, p->index);
-}
-
diff --git a/hw/p5ioc2.c b/hw/p5ioc2.c
deleted file mode 100644
index b0592d6..0000000
--- a/hw/p5ioc2.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/* Copyright 2013-2014 IBM Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <skiboot.h>
-#include <p5ioc2.h>
-#include <p5ioc2-regs.h>
-#include <cec.h>
-#include <gx.h>
-#include <opal.h>
-#include <interrupts.h>
-#include <device.h>
-#include <timebase.h>
-#include <vpd.h>
-#include <ccan/str/str.h>
-
-
-static int64_t p5ioc2_set_tce_mem(struct io_hub *hub, uint64_t address,
- uint64_t size)
-{
- struct p5ioc2 *ioc = iohub_to_p5ioc2(hub);
- int64_t rc;
-
- printf("P5IOC2: set_tce_mem(0x%016llx size 0x%llx)\n",
- address, size);
-
- /* The address passed must be naturally aligned */
- if (address && !is_pow2(size))
- return OPAL_PARAMETER;
- if (address & (size - 1))
- return OPAL_PARAMETER;
-
- ioc->tce_base = address;
- ioc->tce_size = size;
-
- rc = gx_configure_tce_bar(ioc->host_chip, ioc->gx_bus,
- address, size);
- if (rc)
- return OPAL_INTERNAL_ERROR;
- return OPAL_SUCCESS;
-}
-
-static int64_t p5ioc2_get_diag_data(struct io_hub *hub __unused,
- void *diag_buffer __unused,
- uint64_t diag_buffer_len __unused)
-{
- /* XXX Not yet implemented */
- return OPAL_UNSUPPORTED;
-}
-
-static const struct io_hub_ops p5ioc2_hub_ops = {
- .set_tce_mem = p5ioc2_set_tce_mem,
- .get_diag_data = p5ioc2_get_diag_data,
-};
-
-static void p5ioc2_inits(struct p5ioc2 *ioc)
-{
- uint64_t val;
- unsigned int p, n;
-
- printf("P5IOC2: Initializing hub...\n");
-
- /*
- * BML base inits
- */
- /* mask off interrupt presentation timeout in FIRMC */
- out_be64(ioc->regs + (P5IOC2_FIRMC | P5IOC2_REG_OR),
- 0x0000080000000000);
-
- /* turn off display alter mode */
- out_be64(ioc->regs + (P5IOC2_CTL | P5IOC2_REG_AND),
- 0xffffff7fffffffff);
-
- /* setup hub and clustering interrupts BUIDs to 1 and 2 */
- out_be64(ioc->regs + P5IOC2_SBUID, 0x0001000200000000);
-
- /* setup old style MSI BUID (should be unused but set it up anyway) */
- out_be32(ioc->regs + P5IOC2_BUCO, 0xf);
-
- /* Set XIXO bit 0 needed for "enhanced" TCEs or else TCE
- * fetches appear as normal memory reads on GX causing
- * P7 to checkstop when a TCE DKill collides with them.
- */
- out_be64(ioc->regs + P5IOC2_XIXO, in_be64(ioc->regs + P5IOC2_XIXO)
- | P5IOC2_XIXO_ENH_TCE);
-
- /* Clear routing tables */
- for (n = 0; n < 16; n++) {
- for (p = 0; p < 8; p++)
- out_be64(ioc->regs + P5IOC2_TxRTE(p,n), 0);
- }
- for (n = 0; n < 32; n++)
- out_be64(ioc->regs + P5IOC2_BUIDRTE(n), 0);
-
- /*
- * Setup routing. We use the same setup that pHyp appears
- * to do (after inspecting the various registers with SCOM)
- *
- * We assume the BARs are already setup by the FSP such
- * that BAR0 is 128G (8G region size) and BAR6 is
- * 256M (16M region size).
- *
- * The routing is based on what pHyp and BML do, each Calgary
- * get one slice of BAR6 and two slices of BAR0
- */
- /* BAR 0 segments 0 & 1 -> CA0 */
- out_be64(ioc->regs + P5IOC2_TxRTE(0,0),
- P5IOC2_TxRTE_VALID | P5IOC2_CA0_RIO_ID);
- out_be64(ioc->regs + P5IOC2_TxRTE(0,1),
- P5IOC2_TxRTE_VALID | P5IOC2_CA0_RIO_ID);
-
- /* BAR 0 segments 2 & 3 -> CA1 */
- out_be64(ioc->regs + P5IOC2_TxRTE(0,2),
- P5IOC2_TxRTE_VALID | P5IOC2_CA1_RIO_ID);
- out_be64(ioc->regs + P5IOC2_TxRTE(0,3),
- P5IOC2_TxRTE_VALID | P5IOC2_CA1_RIO_ID);
-
- /* BAR 6 segments 0 -> CA0 */
- out_be64(ioc->regs + P5IOC2_TxRTE(6,0),
- P5IOC2_TxRTE_VALID | P5IOC2_CA0_RIO_ID);
-
- /* BAR 6 segments 1 -> CA0 */
- out_be64(ioc->regs + P5IOC2_TxRTE(6,1),
- P5IOC2_TxRTE_VALID | P5IOC2_CA1_RIO_ID);
-
- /*
- * BUID routing, we send entries 1 to CA0 and 2 to CA1
- * just like pHyp and make sure the base and mask are
- * both clear in SID to we route the whole 512 block
- */
- val = in_be64(ioc->regs + P5IOC2_SID);
- val = SETFIELD(P5IOC2_SID_BUID_BASE, val, 0);
- val = SETFIELD(P5IOC2_SID_BUID_MASK, val, 0);
- out_be64(ioc->regs + P5IOC2_SID, val);
- out_be64(ioc->regs + P5IOC2_BUIDRTE(1),
- P5IOC2_BUIDRTE_VALID | P5IOC2_BUIDRTE_RR_RET |
- P5IOC2_CA0_RIO_ID);
- out_be64(ioc->regs + P5IOC2_BUIDRTE(2),
- P5IOC2_BUIDRTE_VALID | P5IOC2_BUIDRTE_RR_RET |
- P5IOC2_CA1_RIO_ID);
-}
-
-static void p5ioc2_ca_init(struct p5ioc2 *ioc, int ca)
-{
- void *regs = ca ? ioc->ca1_regs : ioc->ca0_regs;
- uint64_t val;
-
- printf("P5IOC2: Initializing Calgary %d...\n", ca);
-
- /* Setup device BUID */
- val = SETFIELD(CA_DEVBUID_MASK, 0ul, ca ? P5IOC2_CA1_BUID : P5IOC2_CA0_BUID);
- out_be32(regs + CA_DEVBUID, val);
-
- /* Setup HubID in TARm (and keep TCE clear, Linux will init that)
- *
- * BML and pHyp sets the values to 1 for CA0 and 4 for CA1. We
- * keep the TAR valid bit clear as well.
- */
- val = SETFIELD(CA_TAR_HUBID, 0ul, ca ? 4 : 1);
- val = SETFIELD(CA_TAR_ALTHUBID, val, ca ? 4 : 1);
- out_be64(regs + CA_TAR0, val);
- out_be64(regs + CA_TAR1, val);
- out_be64(regs + CA_TAR2, val);
- out_be64(regs + CA_TAR3, val);
-
- /* Bridge config register. We set it up to the same value as observed
- * under pHyp on a Juno machine. The difference from the IPL value is
- * that TCE buffers are enabled, discard timers are increased and
- * we disable response status to avoid errors.
- */
- //out_be64(regs + CA_CCR, 0x5045DDDED2000000);
- // disable memlimit:
- out_be64(regs + CA_CCR, 0x5005DDDED2000000);
-
- /* The system memory base/limit etc... setup will be done when the
- * user enables TCE via OPAL calls
- */
-}
-
-static void p5ioc2_create_hub(struct dt_node *np)
-{
- struct p5ioc2 *ioc;
- unsigned int i, id, irq;
- char *path;
-
- /* Use the BUID extension as ID and add it to device-tree */
- id = dt_prop_get_u32(np, "ibm,buid-ext");
- path = dt_get_path(np);
- printf("P5IOC2: Found at %s ID 0x%x\n", path, id);
- free(path);
- dt_add_property_cells(np, "ibm,opal-hubid", 0, id);
-
- /* Load VPD LID */
- vpd_preload(np);
- vpd_iohub_load(np);
-
- ioc = zalloc(sizeof(struct p5ioc2));
- if (!ioc)
- return;
- ioc->hub.hub_id = id;
- ioc->hub.ops = &p5ioc2_hub_ops;
- ioc->dt_node = np;
-
- /* We assume SBAR == GX0 + some hard coded offset */
- ioc->regs = (void *)dt_get_address(np, 0, NULL);
-
- /* For debugging... */
- for (i = 0; i < 8; i++)
- printf("P5IOC2: BAR%d = 0x%016llx M=0x%16llx\n", i,
- in_be64(ioc->regs + P5IOC2_BAR(i)),
- in_be64(ioc->regs + P5IOC2_BARM(i)));
-
- ioc->host_chip = dt_get_chip_id(np);
-
- ioc->gx_bus = dt_prop_get_u32(np, "ibm,gx-index");
-
- /* Rather than reading the BARs in P5IOC2, we "know" that
- * BAR6 matches GX BAR 1 and BAR0 matches GX BAR 2. This
- * is a bit fishy but will work for the few machines this
- * is intended to work on
- */
- ioc->bar6 = dt_prop_get_u64(np, "ibm,gx-bar-1");
- ioc->bar0 = dt_prop_get_u64(np, "ibm,gx-bar-2");
-
- printf("DT BAR6 = 0x%016llx\n", ioc->bar6);
- printf("DT BAR0 = 0x%016llx\n", ioc->bar0);
-
- /* We setup the corresponding Calgary register bases and memory
- * regions. Note: those cannot be used until the routing has
- * been setup by inits
- */
- ioc->ca0_regs = (void *)ioc->bar6 + P5IOC2_CA0_REG_OFFSET;
- ioc->ca1_regs = (void *)ioc->bar6 + P5IOC2_CA1_REG_OFFSET;
- ioc->ca0_mm_region = ioc->bar0 + P5IOC2_CA0_MM_OFFSET;
- ioc->ca1_mm_region = ioc->bar0 + P5IOC2_CA1_MM_OFFSET;
-
- /* Base of our BUIDs, will be refined later */
- ioc->buid_base = id << 9;
-
- /* Add interrupts: XXX These are the hub interrupts, we should add the
- * calgary ones as well... but we don't handle any of them currently
- * anyway.
- */
- irq = (ioc->buid_base + 1) << 4;
- dt_add_property_cells(np, "interrupts", irq, irq + 1);
- dt_add_property_cells(np, "interrupt-base", irq);
-
-
- /* Now, we do the bulk of the inits */
- p5ioc2_inits(ioc);
- p5ioc2_ca_init(ioc, 0);
- p5ioc2_ca_init(ioc, 1);
-
- /* So how do we know what PHBs to create ? Let's try all of them
- * and we'll see if that causes problems. TODO: Use VPD !
- */
- for (i = 0; i < 4; i++)
- p5ioc2_phb_setup(ioc, &ioc->ca0_phbs[i], 0, i, true,
- ioc->buid_base + P5IOC2_CA0_BUID + i + 1);
- for (i = 0; i < 4; i++)
- p5ioc2_phb_setup(ioc, &ioc->ca1_phbs[i], 1, i, true,
- ioc->buid_base + P5IOC2_CA1_BUID + i + 1);
-
- /* Reset delay... synchronous, hope we never do that as a
- * result of an OPAL callback. We shouldn't really need this
- * here and may fold it in the generic slot init sequence but
- * it's not like we care much about that p5ioc2 code...
- *
- * This is mostly to give devices a chance to settle after
- * having lifted the reset pin on PCI-X.
- */
- time_wait_ms(1000);
-
- printf("P5IOC2: Initialization complete\n");
-
- cec_register(&ioc->hub);
-}
-
-void probe_p5ioc2(void)
-{
- struct dt_node *np;
-
- dt_for_each_compatible(dt_root, np, "ibm,p5ioc2")
- p5ioc2_create_hub(np);
-}
-
diff --git a/hw/p7ioc-inits.c b/hw/p7ioc-inits.c
index dc5c370..0d50694 100644
--- a/hw/p7ioc-inits.c
+++ b/hw/p7ioc-inits.c
@@ -56,9 +56,9 @@ static void p7ioc_init_BI(struct p7ioc *ioc)
printf("P7IOC: -> Configured for P7+\n");
/* Chicken switches */
- REGW(0x3c00d8, 0x0004000000000600);
+ REGW(0x3c00d8, 0x0004000000000600UL);
/* GX config */
- REGW(0x3c00a0, 0x9F8929BE00880085);
+ REGW(0x3c00a0, 0x9F8929BE00880085UL);
} else {
printf("P7IOC: -> Configured for P7\n");
@@ -68,9 +68,9 @@ static void p7ioc_init_BI(struct p7ioc *ioc)
* xscom ?
*/
/* Chicken switches */
- REGW(0x3c00d8, 0x00040000000004C0);
+ REGW(0x3c00d8, 0x00040000000004C0UL);
/* GX config */
- REGW(0x3c00a0, 0x9C8929BE00880085);
+ REGW(0x3c00a0, 0x9C8929BE00880085UL);
}
/*
@@ -81,160 +81,160 @@ static void p7ioc_init_BI(struct p7ioc *ioc)
time_wait_ms(100);
/* Init_3: Upbound Credit Config */
- REGW(0x3c00c8, 0x0303060403030000);
+ REGW(0x3c00c8, 0x0303060403030000UL);
/* Init_4: Credit Init Timer */
- REGW(0x3c00e8, 0x00000000000000FF);
+ REGW(0x3c00e8, 0x00000000000000FFUL);
/* Init_4.1: BI Ack Timing */
- REGW(0x3c00e8, 0x0000FC0000000000);
+ REGW(0x3c00e8, 0x0000FC0000000000UL);
/* Init_5: Ordering Override 0*/
- REGW(0x3c0200, 0x0000000000000000);
+ REGW(0x3c0200, 0x0000000000000000UL);
/* Init_6: Ordering Override 1*/
- REGW(0x3c0208, 0x0000000000000000);
+ REGW(0x3c0208, 0x0000000000000000UL);
/*** Downbound TTYPE table ***/
/* Init_7: Enable sequence / speculation for CI Loads */
- REGW(0x3c00a8, 0x0000000000000004);
+ REGW(0x3c00a8, 0x0000000000000004UL);
/* Init_8: */
- REGW(0x3c00b0, 0x700800C000000000);
+ REGW(0x3c00b0, 0x700800C000000000UL);
/* Init_9: Enable sequence / speculation for CI Stores */
- REGW(0x3c00a8, 0x0000000000000005);
+ REGW(0x3c00a8, 0x0000000000000005UL);
/* Init_10: */
- REGW(0x3c00b0, 0x704820C000000000);
+ REGW(0x3c00b0, 0x704820C000000000UL);
/* Init_11: Enable speculation for EOI */
- REGW(0x3c00a8, 0x000000000000001B);
+ REGW(0x3c00a8, 0x000000000000001BUL);
/* Init_12: */
- REGW(0x3c00b0, 0x3590204000000000);
+ REGW(0x3c00b0, 0x3590204000000000UL);
/* Init_13: ENable speculation for DMA Rd Responses */
- REGW(0x3c00a8, 0x0000000000000020);
+ REGW(0x3c00a8, 0x0000000000000020UL);
/* Init_14: */
- REGW(0x3c00b0, 0x1103C4C000000000);
+ REGW(0x3c00b0, 0x1103C4C000000000UL);
/* Init_15: Enable sequence for DMA RWNITC */
- REGW(0x3c00a8, 0x0000000000000001);
+ REGW(0x3c00a8, 0x0000000000000001UL);
/* Init_16: */
- REGW(0x3c00b0, 0xC000000000000000);
+ REGW(0x3c00b0, 0xC000000000000000UL);
/* Init_17: Enable sequence for IOKill */
- REGW(0x3c00a8, 0x0000000000000009);
+ REGW(0x3c00a8, 0x0000000000000009UL);
/* Init_18: */
- REGW(0x3c00b0, 0x4208210000000000);
+ REGW(0x3c00b0, 0x4208210000000000UL);
/* Init_19: Enable sequence for IOKill */
- REGW(0x3c00a8, 0x000000000000000A);
+ REGW(0x3c00a8, 0x000000000000000AUL);
/* Init_20: */
- REGW(0x3c00b0, 0x4200210000000000);
+ REGW(0x3c00b0, 0x4200210000000000UL);
/* Init_21: Enable sequence for FMTC CI Store w/Kill */
- REGW(0x3c00a8, 0x0000000000000021);
+ REGW(0x3c00a8, 0x0000000000000021UL);
/*** Timer controls ***/
/* Init_22: */
- REGW(0x3c00b0, 0x4200300000000000);
+ REGW(0x3c00b0, 0x4200300000000000UL);
/* Init_23: Dnbound timer mask */
- REGW(0x3c0190, 0x0040000000000000);
+ REGW(0x3c0190, 0x0040000000000000UL);
/* Init_24: Upbound timer mask 0 */
- REGW(0x3c0180, 0x0010001000100010);
+ REGW(0x3c0180, 0x0010001000100010UL);
/* Init_25: Upbound timer mask 1 */
- REGW(0x3c0188, 0x0010000000000000);
+ REGW(0x3c0188, 0x0010000000000000UL);
/* Init_26: Credit sync check config */
- REGW(0x3c00f0, 0xC102000000000000);
+ REGW(0x3c00f0, 0xC102000000000000UL);
/*** Setup trace ***/
/* Init_27: DBG stop trace */
- REGW(0x3c0410, 0x4000000000000000);
+ REGW(0x3c0410, 0x4000000000000000UL);
/* Init_28: DBG control */
- REGW(0x3c0400, 0x0000000000000000);
+ REGW(0x3c0400, 0x0000000000000000UL);
/* Init_29: DBG Mode */
- REGW(0x3c0408, 0xA0000000F0CC3300);
+ REGW(0x3c0408, 0xA0000000F0CC3300UL);
/* Init_29a: DBG C0 (Stop on Error) */
- REGW(0x3c0418, 0xF4F00FFF00000000);
+ REGW(0x3c0418, 0xF4F00FFF00000000UL);
/* Init_30: DBG pre-mux select */
- REGW(0x3c0478, 0x0023000000000000);
+ REGW(0x3c0478, 0x0023000000000000UL);
/* Init_31: CA0 mode */
- REGW(0x3c04b0, 0x8000000000000000);
+ REGW(0x3c04b0, 0x8000000000000000UL);
/* Init_32: CA0 Compression 0 */
- REGW(0x3c04b8, 0x0000000000000000);
+ REGW(0x3c04b8, 0x0000000000000000UL);
/* Init_33: CA0 Compression 1 */
- REGW(0x3c04c0, 0x0000000000000000);
+ REGW(0x3c04c0, 0x0000000000000000UL);
/* Init_34: CA0 Pattern A march (cmd1 selected val) */
- REGW(0x3c0480, 0x008000007FFFFF00);
+ REGW(0x3c0480, 0x008000007FFFFF00UL);
/* Init_35: CA0 Trigger 0 definition (pattern A) */
- REGW(0x3c04a0, 0x8000000000000000);
+ REGW(0x3c04a0, 0x8000000000000000UL);
/* Init_36: CA1 mode */
- REGW(0x3c0530, 0x8000000000000000);
+ REGW(0x3c0530, 0x8000000000000000UL);
/* Init_37: CA1 Compression 0 */
- REGW(0x3c0538, 0x0000000000000000);
+ REGW(0x3c0538, 0x0000000000000000UL);
/* Init_38: CA1 Compression 1 */
- REGW(0x3c0540, 0x0000000000000000);
+ REGW(0x3c0540, 0x0000000000000000UL);
/* Init_39: CA2 mode */
- REGW(0x3c05b0, 0x8000000000000000);
+ REGW(0x3c05b0, 0x8000000000000000UL);
/* Init_40: CA2 Compression 0 */
- REGW(0x3c05b8, 0x0000000000000000);
+ REGW(0x3c05b8, 0x0000000000000000UL);
/* Init_41: CA2 Compression 1 */
- REGW(0x3c05c0, 0x0000000000000000);
+ REGW(0x3c05c0, 0x0000000000000000UL);
/* Init_42: CA3 Mode */
- REGW(0x3c0630, 0x8000000000000000);
+ REGW(0x3c0630, 0x8000000000000000UL);
/* Init_43: CA3 Compression 0 */
- REGW(0x3c0638, 0x0000000000000000);
+ REGW(0x3c0638, 0x0000000000000000UL);
/* Init_44: CA3 Compression 1 */
- REGW(0x3c0640, 0x0000000000000000);
+ REGW(0x3c0640, 0x0000000000000000UL);
/* Init_45: CA3 Pattern A match (AIB val) */
- REGW(0x3c0600, 0x80000100FFFEFF00);
+ REGW(0x3c0600, 0x80000100FFFEFF00UL);
/* Init_46: CA3 Trigger 0 definition (pattern A) */
- REGW(0x3c0620, 0x8000000000000000);
+ REGW(0x3c0620, 0x8000000000000000UL);
/* Init_47: DBG unfreeze trace */
- REGW(0x3c0410, 0x1000000000000000);
+ REGW(0x3c0410, 0x1000000000000000UL);
/* Init_48: DBG start trace */
- REGW(0x3c0410, 0x8000000000000000);
+ REGW(0x3c0410, 0x8000000000000000UL);
/*** AIB Port Config ***/
/* Init_49: AIB Port Information */
- REGW(0x3c00d0, 0x0888888800000000);
+ REGW(0x3c00d0, 0x0888888800000000UL);
/* Init_50: Port Ordering controls */
- REGW(0x3c0200, 0x0000000000000000);
+ REGW(0x3c0200, 0x0000000000000000UL);
/*** LEMs (need to match recov. tables) ***/
/* Init_51: Clear upbound LEM */
- REGW(0x3c0000, 0x0000000000000000);
+ REGW(0x3c0000, 0x0000000000000000UL);
/* Init_52: Clear upbound WOF */
- REGW(0x3c0040, 0x0000000000000000);
+ REGW(0x3c0040, 0x0000000000000000UL);
/* Init_53: Clear Dnbound LEM */
- REGW(0x3c0050, 0x0000000000000000);
+ REGW(0x3c0050, 0x0000000000000000UL);
/* Init_54: Clear Dnbound WOF */
- REGW(0x3c0090, 0x0000000000000000);
+ REGW(0x3c0090, 0x0000000000000000UL);
/* Init_55: Clear Fences */
- REGW(0x3c0130, 0x0000000000000000);
+ REGW(0x3c0130, 0x0000000000000000UL);
/* Init_56: Clear Erpt latches */
- REGW(0x3c0148, 0x0080000000000000);
+ REGW(0x3c0148, 0x0080000000000000UL);
/* Init_57: Set Upbound LEM Action0 */
- REGW(0x3c0030, 0x0800000000800000);
+ REGW(0x3c0030, 0x0800000000800000UL);
/* Init_58: Set Upbound LEN Action1 */
- REGW(0x3c0038, 0x0000000000000000);
+ REGW(0x3c0038, 0x0000000000000000UL);
/* Init_59: Set Upbound LEM Mask (AND write) */
- REGW(0x3c0020, 0x0800000000000000);
+ REGW(0x3c0020, 0x0800000000000000UL);
/* Init_60: Set Dnbound LEM Action0 */
- REGW(0x3c0080, 0x2000080CA07FFF40);
+ REGW(0x3c0080, 0x2000080CA07FFF40UL);
/* Init_61: Set Dnbound LEM Action1 */
- REGW(0x3c0088, 0x0000000000000000);
+ REGW(0x3c0088, 0x0000000000000000UL);
/* Init_62: Set Dnbound LEM Mask (AND write) */
- REGW(0x3c0070, 0x00000800200FFE00);
+ REGW(0x3c0070, 0x00000800200FFE00UL);
/*** Setup Fences (need to match recov. tables) ***/
/* Init_63: Set Upbound Damage Control 0 (GX Err) */
- REGW(0x3c0100, 0xF7FFFFFFFF7FFFFF);
+ REGW(0x3c0100, 0xF7FFFFFFFF7FFFFFUL);
/* Init_64: Set Upbound Damage Control 1 (AIB Fence) */
- REGW(0x3c0108, 0xF7FFFFFFFF7FFFFF);
+ REGW(0x3c0108, 0xF7FFFFFFFF7FFFFFUL);
/* Init_65: Set Upbound Damage Control 2 (Drop Pkt) */
- REGW(0x3c0110, 0x0010054000000000);
+ REGW(0x3c0110, 0x0010054000000000UL);
/* Init_66: Set Dnbound Damage Control 0 (GX Err) */
- REGW(0x3c0118, 0xDFFFF7F35F8000BF);
+ REGW(0x3c0118, 0xDFFFF7F35F8000BFUL);
/* Init_67: Set Dnbound Damage Control 1 (AIB Fence) */
- REGW(0x3c0120, 0xDFFFF7F35F8000BF);
+ REGW(0x3c0120, 0xDFFFF7F35F8000BFUL);
/* Init_68: Set Dnbound Damage Control 2 (Drop Pkt) */
- REGW(0x3c0128, 0x0000000C00000000);
+ REGW(0x3c0128, 0x0000000C00000000UL);
}
static void p7ioc_init_MISC_HSS(struct p7ioc *ioc)
@@ -255,11 +255,11 @@ static void p7ioc_init_MISC_HSS(struct p7ioc *ioc)
continue;
/* Init_1: HSSn CTL2 */
- REGW(regbase + P7IOC_HSSn_CTL2_OFFSET, 0xFFFF6DB6DB000000);
+ REGW(regbase + P7IOC_HSSn_CTL2_OFFSET, 0xFFFF6DB6DB000000UL);
/* Init_2: HSSn CTL3 */
- REGW(regbase + P7IOC_HSSn_CTL3_OFFSET, 0x1130000320000000);
+ REGW(regbase + P7IOC_HSSn_CTL3_OFFSET, 0x1130000320000000UL);
/* Init_3: HSSn CTL8 */
- REGW(regbase + P7IOC_HSSn_CTL8_OFFSET, 0xDDDDDDDD00000000);
+ REGW(regbase + P7IOC_HSSn_CTL8_OFFSET, 0xDDDDDDDD00000000UL);
#if 0 /* All these remain set to the values configured by the FSP */
/* Init_4: HSSn CTL9 */
@@ -316,27 +316,27 @@ static void p7ioc_init_RGC(struct p7ioc *ioc)
/*** Set LEM regs (needs to match recov. code) */
/* Init_3: LEM FIR Accumulator */
- REGW(0x3e1e00, 0x0000000000000000);
+ REGW(0x3e1e00, 0x0000000000000000UL);
/* Init_4: LEM Action 0 */
- REGW(0x3e1e30, 0x0FFF791F0B030000);
+ REGW(0x3e1e30, 0x0FFF791F0B030000UL);
/* Init_5: LEN Action 1 */
- REGW(0x3e1e38, 0x0000000000000000);
+ REGW(0x3e1e38, 0x0000000000000000UL);
/* Init_6: LEM WOF */
- REGW(0x3e1e40, 0x0000000000000000);
+ REGW(0x3e1e40, 0x0000000000000000UL);
/* Init_7: LEM Mask Reg (AND write) */
- REGW(0x3e1e20, 0x0FFF001F03030000);
+ REGW(0x3e1e20, 0x0FFF001F03030000UL);
/*** Set GEM regs (masks still on, no irpts can occur yet) ***/
/* Init_8: GEM XFIR */
- REGW(0x3e0008, 0x0000000000000000);
+ REGW(0x3e0008, 0x0000000000000000UL);
/* Init_9: GEM WOF */
- REGW(0x3e0028, 0x0000000000000000);
+ REGW(0x3e0028, 0x0000000000000000UL);
/*** Set Damage Controls (needs to match recov.) ***/
/* Init_10: LDCP */
- REGW(0x3e1c18, 0xF00086C0B4FCFFFF);
+ REGW(0x3e1c18, 0xF00086C0B4FCFFFFUL);
/*** Read status (optional) ***/
@@ -347,15 +347,15 @@ static void p7ioc_init_RGC(struct p7ioc *ioc)
/*** Set running configuration **/
/* Init_12: Configuration reg (modes, values, timers) */
- REGW(0x3e1c08, 0x10000077CE100000);
+ REGW(0x3e1c08, 0x10000077CE100000UL);
/* Init_13: Cmd/Dat Crd Allocation */
- REGW(0x3e1c20, 0x00000103000700FF);
+ REGW(0x3e1c20, 0x00000103000700FFUL);
/* Init_14: GP reg - disable errs, wrap, stop_trc */
- REGW(0x3e1018, 0x0000000000000000);
+ REGW(0x3e1018, 0x0000000000000000UL);
/* Init_15: Configuration reg (start init timers) */
cfg = REGR(0x3e1c08);
- REGW(0x3e1c08, cfg | 0x00003f0000000000);
+ REGW(0x3e1c08, cfg | 0x00003f0000000000UL);
/*** Setup interrupts ***/
@@ -386,29 +386,29 @@ static void p7ioc_init_RGC(struct p7ioc *ioc)
//REGR(0x3e1840, 0x0000000000000000);
/* Init_18: IODA Table Addr: Select IST*/
- REGW(0x3e1820, 0x8001000000000000);
+ REGW(0x3e1820, 0x8001000000000000UL);
/* Init_19: IODA Table Data: IRPT 0 */
- REGW(0x3e1830, 0x0000000000000000);
+ REGW(0x3e1830, 0x0000000000000000UL);
/* Init_20: IODA Table Data: IRPT 1 */
- REGW(0x3e1830, 0x0000000000000000);
+ REGW(0x3e1830, 0x0000000000000000UL);
/* Init_21: IODA Table Addr: Select HRT */
- REGW(0x3e1820, 0x8000000000000000);
+ REGW(0x3e1820, 0x8000000000000000UL);
/* Init_22: IODA Table Data: HRT
*
* XXX Figure out what this actually is and what value should
* we use. For now, do like BML and use 0
*/
for (i = 0; i < 4; i++)
- REGW(0x3e1830, 0x0000000000000000);
+ REGW(0x3e1830, 0x0000000000000000UL);
/* Init_23: IODA Table Addr: select XIVT */
- REGW(0x3e1820, 0x8002000000000000);
+ REGW(0x3e1820, 0x8002000000000000UL);
/* Init_24: IODA Table Data: Mask all interrupts */
for (i = 0; i < 16; i++)
- REGW(0x3e1830, 0x000000ff00000000);
+ REGW(0x3e1830, 0x000000ff00000000UL);
/* Init_25: Clear table lock if any was stale */
- REGW(0x3e1840, 0x0000000000000000);
+ REGW(0x3e1840, 0x0000000000000000UL);
/* Init_32..37: Set the PHB AIB addresses. We configure those
* to the values recommended in the p7IOC doc.
@@ -536,8 +536,8 @@ static void p7ioc_init_ci_routing(struct p7ioc *ioc)
REGW(P7IOC_CI_RMATC_REG(i), rmatch[i]);
/* Init_225: CI Match 47 (Configure RGC catch all) */
- REGW(P7IOC_CI_RMASK_REG(47), 0x0000000000000000);
- REGW(P7IOC_CI_RMATC_REG(47), 0x4000800000000000);
+ REGW(P7IOC_CI_RMASK_REG(47), 0x0000000000000000UL);
+ REGW(P7IOC_CI_RMATC_REG(47), 0x4000800000000000UL);
#ifdef DUMP_CI_ROUTING
printf("P7IOC: CI Routing table:\n");
@@ -560,189 +560,189 @@ static void p7ioc_init_CI(struct p7ioc *ioc)
*/
/* Init_1: CI Port 0 Configuration */
- REGW(0x3d0000, 0x420000C0073F0002);
+ REGW(0x3d0000, 0x420000C0073F0002UL);
/* Init_2: CI Port 0 Configuration */
- REGW(0x3d0000, 0x020000C0073F0002);
+ REGW(0x3d0000, 0x020000C0073F0002UL);
/* Init_3: CI Port 1 Configuration */
- REGW(0x3d1000, 0x42000FCF07200002);
+ REGW(0x3d1000, 0x42000FCF07200002UL);
/* Init_4: CI Port 1 Configuration */
- REGW(0x3d1000, 0x02000FCF07200002);
+ REGW(0x3d1000, 0x02000FCF07200002UL);
/* Init_5: CI Port 2 Configuration */
- REGW(0x3d2000, 0x420000C307200002);
+ REGW(0x3d2000, 0x420000C307200002UL);
/* Init_6: CI Port 2 Configuration */
- REGW(0x3d2000, 0x020000C307200002);
+ REGW(0x3d2000, 0x020000C307200002UL);
/* Init_7: CI Port 3 Configuration */
- REGW(0x3d3000, 0x420000C307200002);
+ REGW(0x3d3000, 0x420000C307200002UL);
/* Init_8: CI Port 3 Configuration */
- REGW(0x3d3000, 0x020000C307200002);
+ REGW(0x3d3000, 0x020000C307200002UL);
/* Init_9: CI Port 4 Configuration */
- REGW(0x3d4000, 0x420000C307200002);
+ REGW(0x3d4000, 0x420000C307200002UL);
/* Init_10: CI Port 4 Configuration */
- REGW(0x3d4000, 0x020000C307200002);
+ REGW(0x3d4000, 0x020000C307200002UL);
/* Init_11: CI Port 5 Configuration */
- REGW(0x3d5000, 0x420000C307200002);
+ REGW(0x3d5000, 0x420000C307200002UL);
/* Init_12: CI Port 5 Configuration */
- REGW(0x3d5000, 0x020000C307200002);
+ REGW(0x3d5000, 0x020000C307200002UL);
/* Init_13: CI Port 6 Configuration */
- REGW(0x3d6000, 0x420000C307200002);
+ REGW(0x3d6000, 0x420000C307200002UL);
/* Init_14: CI Port 6 Configuration */
- REGW(0x3d6000, 0x020000C307200002);
+ REGW(0x3d6000, 0x020000C307200002UL);
/* Init_15: CI Port 7 Configuration */
- REGW(0x3d7000, 0x420000C307200002);
+ REGW(0x3d7000, 0x420000C307200002UL);
/* Init_16: CI Port 7 Configuration */
- REGW(0x3d7000, 0x020000C307200002);
+ REGW(0x3d7000, 0x020000C307200002UL);
/*** Set LEM regs (need to match recov.) ***/
/* Init_17: CI Port 0 LEM FIR Accumulator */
- REGW(0x3d0200, 0x0000000000000000);
+ REGW(0x3d0200, 0x0000000000000000UL);
/* Init_18: CI Port 0 LEM Action 0 */
- REGW(0x3d0230, 0x0A00000000000000);
+ REGW(0x3d0230, 0x0A00000000000000UL);
/* Init_19: CI Port 0 LEM Action 1 */
- REGW(0x3d0238, 0x0000000000000000);
+ REGW(0x3d0238, 0x0000000000000000UL);
/* Init_20: CI Port 0 LEM WOF */
- REGW(0x3d0240, 0x0000000000000000);
+ REGW(0x3d0240, 0x0000000000000000UL);
/* Init_21: CI Port 0 LEM Mask (AND write) */
- REGW(0x3d0220, 0x0200000000000000);
+ REGW(0x3d0220, 0x0200000000000000UL);
/* Init_22: CI Port 1 LEM FIR Accumularor */
- REGW(0x3d1200, 0x0000000000000000);
+ REGW(0x3d1200, 0x0000000000000000UL);
/* Init_23: CI Port 1 LEM Action 0 */
- REGW(0x3d1230, 0x0000000000000000);
+ REGW(0x3d1230, 0x0000000000000000UL);
/* Init_24: CI Port 1 LEM Action 1 */
- REGW(0x3d1238, 0x0000000000000000);
+ REGW(0x3d1238, 0x0000000000000000UL);
/* Init_25: CI Port 1 LEM WOF */
- REGW(0x3d1240, 0x0000000000000000);
+ REGW(0x3d1240, 0x0000000000000000UL);
/* Init_26: CI Port 1 LEM Mask (AND write) */
- REGW(0x3d1220, 0x0000000000000000);
+ REGW(0x3d1220, 0x0000000000000000UL);
/* Init_27: CI Port 2 LEM FIR Accumulator */
- REGW(0x3d2200, 0x0000000000000000);
+ REGW(0x3d2200, 0x0000000000000000UL);
/* Init_28: CI Port 2 LEM Action 0 */
- REGW(0x3d2230, 0xA4F4000000000000);
+ REGW(0x3d2230, 0xA4F4000000000000UL);
/* Init_29: CI Port 2 LEM Action 1 */
- REGW(0x3d2238, 0x0000000000000000);
+ REGW(0x3d2238, 0x0000000000000000UL);
/* Init_30: CI Port 2 LEM WOF */
- REGW(0x3d2240, 0x0000000000000000);
+ REGW(0x3d2240, 0x0000000000000000UL);
/* Init_31: CI Port 2 LEM Mask (AND write) */
- REGW(0x3d2220, 0x0000000000000000);
+ REGW(0x3d2220, 0x0000000000000000UL);
/* Init_32: CI Port 3 LEM FIR Accumulator */
- REGW(0x3d3200, 0x0000000000000000);
+ REGW(0x3d3200, 0x0000000000000000UL);
/* Init_33: CI Port 3 LEM Action 0 */
- REGW(0x3d3230, 0xA4F4000000000000);
+ REGW(0x3d3230, 0xA4F4000000000000UL);
/* Init_34: CI Port 3 LEM Action 1 */
- REGW(0x3d3238, 0x0000000000000000);
+ REGW(0x3d3238, 0x0000000000000000UL);
/* Init_35: CI Port 3 LEM WOF */
- REGW(0x3d3240, 0x0000000000000000);
+ REGW(0x3d3240, 0x0000000000000000UL);
/* Init_36: CI Port 3 LEM Mask (AND write) */
- REGW(0x3d3220, 0x0000000000000000);
+ REGW(0x3d3220, 0x0000000000000000UL);
/* Init_37: CI Port 4 LEM FIR Accumulator */
- REGW(0x3d4200, 0x0000000000000000);
+ REGW(0x3d4200, 0x0000000000000000UL);
/* Init_38: CI Port 4 Action 0 */
- REGW(0x3d4230, 0xA4F4000000000000);
+ REGW(0x3d4230, 0xA4F4000000000000UL);
/* Init_39: CI Port 4 Action 1 */
- REGW(0x3d4238, 0x0000000000000000);
+ REGW(0x3d4238, 0x0000000000000000UL);
/* Init_40: CI Port 4 WOF */
- REGW(0x3d4240, 0x0000000000000000);
+ REGW(0x3d4240, 0x0000000000000000UL);
/* Init_41: CI Port 4 Mask (AND write) */
- REGW(0x3d4220, 0x0000000000000000);
+ REGW(0x3d4220, 0x0000000000000000UL);
/* Init_42: CI Port 5 LEM FIR Accumulator */
- REGW(0x3d5200, 0x0000000000000000);
+ REGW(0x3d5200, 0x0000000000000000UL);
/* Init_43: CI Port 5 Action 0 */
- REGW(0x3d5230, 0xA4F4000000000000);
+ REGW(0x3d5230, 0xA4F4000000000000UL);
/* Init_44: CI Port 5 Action 1 */
- REGW(0x3d5238, 0x0000000000000000);
+ REGW(0x3d5238, 0x0000000000000000UL);
/* Init_45: CI Port 4 WOF */
- REGW(0x3d5240, 0x0000000000000000);
+ REGW(0x3d5240, 0x0000000000000000UL);
/* Init_46: CI Port 5 Mask (AND write) */
- REGW(0x3d5220, 0x0000000000000000);
+ REGW(0x3d5220, 0x0000000000000000UL);
/* Init_47: CI Port 6 LEM FIR Accumulator */
- REGW(0x3d6200, 0x0000000000000000);
+ REGW(0x3d6200, 0x0000000000000000UL);
/* Init_48: CI Port 6 Action 0 */
- REGW(0x3d6230, 0xA4F4000000000000);
+ REGW(0x3d6230, 0xA4F4000000000000UL);
/* Init_49: CI Port 6 Action 1 */
- REGW(0x3d6238, 0x0000000000000000);
+ REGW(0x3d6238, 0x0000000000000000UL);
/* Init_50: CI Port 6 WOF */
- REGW(0x3d6240, 0x0000000000000000);
+ REGW(0x3d6240, 0x0000000000000000UL);
/* Init_51: CI Port 6 Mask (AND write) */
- REGW(0x3d6220, 0x0000000000000000);
+ REGW(0x3d6220, 0x0000000000000000UL);
/* Init_52: CI Port 7 LEM FIR Accumulator */
- REGW(0x3d7200, 0x0000000000000000);
+ REGW(0x3d7200, 0x0000000000000000UL);
/* Init_53: CI Port 7 Action 0 */
- REGW(0x3d7230, 0xA4F4000000000000);
+ REGW(0x3d7230, 0xA4F4000000000000UL);
/* Init_54: CI Port 7 Action 1 */
- REGW(0x3d7238, 0x0000000000000000);
+ REGW(0x3d7238, 0x0000000000000000UL);
/* Init_55: CI Port 7 WOF */
- REGW(0x3d7240, 0x0000000000000000);
+ REGW(0x3d7240, 0x0000000000000000UL);
/* Init_56: CI Port 7 Mask (AND write) */
- REGW(0x3d7220, 0x0000000000000000);
+ REGW(0x3d7220, 0x0000000000000000UL);
/*** Set Damage Controls (need match recov.) ***/
/* Init_57: CI Port 0 LDCP*/
- REGW(0x3d0010, 0x421A0000000075FF);
+ REGW(0x3d0010, 0x421A0000000075FFUL);
/* Init_58: CI Port 1 LDCP */
- REGW(0x3d1010, 0x421A000000007FFF);
+ REGW(0x3d1010, 0x421A000000007FFFUL);
/* Init_59: CI Port 2 LDCP */
- REGW(0x3d2010, 0x421A24F400005B0B);
+ REGW(0x3d2010, 0x421A24F400005B0BUL);
/* Init_60: CI Port 3 LDCP */
- REGW(0x3d3010, 0x421A24F400005B0B);
+ REGW(0x3d3010, 0x421A24F400005B0BUL);
/* Init_61: CI Port 4 LDCP */
- REGW(0x3d4010, 0x421A24F400005B0B);
+ REGW(0x3d4010, 0x421A24F400005B0BUL);
/* Init_62: CI Port 5 LDCP */
- REGW(0x3d5010, 0x421A24F400005B0B);
+ REGW(0x3d5010, 0x421A24F400005B0BUL);
/* Init_63: CI Port 6 LDCP */
- REGW(0x3d6010, 0x421A24F400005B0B);
+ REGW(0x3d6010, 0x421A24F400005B0BUL);
/* Init_64: CI Port 7 LDCP */
- REGW(0x3d7010, 0x421A24F400005B0B);
+ REGW(0x3d7010, 0x421A24F400005B0BUL);
/*** Setup Trace 0 ***/
/* Init_65: CI Trc 0 DBG - Run/Status (stop trace) */
- REGW(0x3d0810, 0x5000000000000000);
+ REGW(0x3d0810, 0x5000000000000000UL);
/* Init_66: CI Trc 0 DBG - Mode (not cross trig CA's) */
- REGW(0x3d0808, 0xB0000000F0000000);
+ REGW(0x3d0808, 0xB0000000F0000000UL);
/* Init_66a: CI Trc 0 DBG - C0 (stop on error) */
- REGW(0x3d0818, 0xF4F00FFF00000000);
+ REGW(0x3d0818, 0xF4F00FFF00000000UL);
/* Init_67: CI Trc 0 DBG - Select (port 0 mode 2) */
- REGW(0x3d0878, 0x0002000000000000);
+ REGW(0x3d0878, 0x0002000000000000UL);
/* Init_68: CI Trc 0 CA0 - Pattern A (RX cmd val) */
- REGW(0x3d0880, 0xC0200000DFFFFF00);
+ REGW(0x3d0880, 0xC0200000DFFFFF00UL);
/* Init_69: CI Trc 0 CA0 - Trigger 0 (Pattern A) */
- REGW(0x3d08a0, 0x8000000000000000);
+ REGW(0x3d08a0, 0x8000000000000000UL);
/* Init_70: CI Trc 0 - Mode */
- REGW(0x3d08b0, 0x8000000000000000);
+ REGW(0x3d08b0, 0x8000000000000000UL);
/* Init_71: CI Trc 0 CA1 - Pattern A (TX cmd val) */
- REGW(0x3d0900, 0xC0200000DFFFFF00);
+ REGW(0x3d0900, 0xC0200000DFFFFF00UL);
/* Init_72: CI Trc 0 CA1 - Trigger 0 (Pattern A) */
- REGW(0x3d0920, 0x8000000000000000);
+ REGW(0x3d0920, 0x8000000000000000UL);
/* Init_73: CI Trc 0 CA1 - Mode */
- REGW(0x3d0930, 0x8000000000000000);
+ REGW(0x3d0930, 0x8000000000000000UL);
/* Init_74: CI Trc 0 DBG - Run/Status (start trace) */
- REGW(0x3d0810, 0x8000000000000000);
+ REGW(0x3d0810, 0x8000000000000000UL);
/*** Setup Trace 1 ***/
/* Init_75: CI Trc 1 DBG - Run/Status (stop trace) */
- REGW(0x3d0c10, 0x5000000000000000);
+ REGW(0x3d0c10, 0x5000000000000000UL);
/* Init_76: CI Trc 1 DBG - Mode (not cross trig CA's) */
- REGW(0x3d0c08, 0xB0000000F0000000);
+ REGW(0x3d0c08, 0xB0000000F0000000UL);
/* Init_76a: CI Trc 1 DBG - C0 (stop on error) */
- REGW(0x3d0c18, 0xF4F00FFF00000000);
+ REGW(0x3d0c18, 0xF4F00FFF00000000UL);
/* Init_77: CI Trc 1 DBG - Select (port 1 mode 2) */
- REGW(0x3d0c78, 0x0102000000000000);
+ REGW(0x3d0c78, 0x0102000000000000UL);
/* Init_78: CI Trc 1 CA0 - Pattern A (RX cmd val) */
- REGW(0x3d0c80, 0xC0200000DFFFFF00);
+ REGW(0x3d0c80, 0xC0200000DFFFFF00UL);
/* Init_79: CI Trc 1 CA0 - Trigger 0 (Pattern A) */
- REGW(0x3d0ca0, 0x8000000000000000);
+ REGW(0x3d0ca0, 0x8000000000000000UL);
/* Init_80: CI Trc 1 CA0 - Mode */
- REGW(0x3d0cb0, 0x8000000000000000);
+ REGW(0x3d0cb0, 0x8000000000000000UL);
/* Init_81: CI Trc 1 CA1 - Pattern A (TX cmd val) */
- REGW(0x3d0d00, 0xC0200000DFFFFF00);
+ REGW(0x3d0d00, 0xC0200000DFFFFF00UL);
/* Init_82: CI Trc 1 CA1 - Trigger 0 (Pattern A) */
- REGW(0x3d0d20, 0x8000000000000000);
+ REGW(0x3d0d20, 0x8000000000000000UL);
/* Init_83: CI Trc 1 CA1 - Mode */
- REGW(0x3d0d30, 0x8000000000000000);
+ REGW(0x3d0d30, 0x8000000000000000UL);
/* Init_84: CI Trc 1 DBG - Run/Status (start trace) */
- REGW(0x3d0c10, 0x8000000000000000);
+ REGW(0x3d0c10, 0x8000000000000000UL);
/* Init_85...92:
*
@@ -756,82 +756,82 @@ static void p7ioc_init_CI(struct p7ioc *ioc)
/*** Set buffer allocations (credits) ***/
/* Init_93: CI Port 0 Rx Cmd Buffer Allocation */
- REGW(0x3d0050, 0x0808040400000000);
+ REGW(0x3d0050, 0x0808040400000000UL);
/* Init_94: CI Port 0 Rx Dat Buffer Allocation */
- REGW(0x3d0060, 0x0006000200000000);
+ REGW(0x3d0060, 0x0006000200000000UL);
/* Init_95: CI Port 1 Tx Cmd Buffer Allocation */
- REGW(0x3d1030, 0x0000040400000000);
+ REGW(0x3d1030, 0x0000040400000000UL);
/* Init_96: CI Port 1 Tx Dat Buffer Allocation */
- REGW(0x3d1040, 0x0000004800000000);
+ REGW(0x3d1040, 0x0000004800000000UL);
/* Init_97: CI Port 1 Rx Cmd Buffer Allocation */
- REGW(0x3d1050, 0x0008000000000000);
+ REGW(0x3d1050, 0x0008000000000000UL);
/* Init_98: CI Port 1 Rx Dat Buffer Allocation */
- REGW(0x3d1060, 0x0048000000000000);
+ REGW(0x3d1060, 0x0048000000000000UL);
/* Init_99: CI Port 2 Tx Cmd Buffer Allocation */
- REGW(0x3d2030, 0x0808080800000000);
+ REGW(0x3d2030, 0x0808080800000000UL);
/* Init_100: CI Port 2 Tx Dat Buffer Allocation */
- REGW(0x3d2040, 0x0086008200000000);
+ REGW(0x3d2040, 0x0086008200000000UL);
/* Init_101: CI Port 2 Rx Cmd Buffer Allocation */
- REGW(0x3d2050, 0x0808080800000000);
+ REGW(0x3d2050, 0x0808080800000000UL);
/* Init_102: CI Port 2 Rx Dat Buffer Allocation */
- REGW(0x3d2060, 0x8648000000000000);
+ REGW(0x3d2060, 0x8648000000000000UL);
/* Init_103: CI Port 3 Tx Cmd Buffer Allocation */
- REGW(0x3d3030, 0x0808080800000000);
+ REGW(0x3d3030, 0x0808080800000000UL);
/* Init_104: CI Port 3 Tx Dat Buffer Allocation */
- REGW(0x3d3040, 0x0086008200000000);
+ REGW(0x3d3040, 0x0086008200000000UL);
/* Init_105: CI Port 3 Rx Cmd Buffer Allocation */
- REGW(0x3d3050, 0x0808080800000000);
+ REGW(0x3d3050, 0x0808080800000000UL);
/* Init_106: CI Port 3 Rx Dat Buffer Allocation */
- REGW(0x3d3060, 0x8648000000000000);
+ REGW(0x3d3060, 0x8648000000000000UL);
/* Init_107: CI Port 4 Tx Cmd Buffer Allocation */
- REGW(0x3d4030, 0x0808080800000000);
+ REGW(0x3d4030, 0x0808080800000000UL);
/* Init_108: CI Port 4 Tx Dat Buffer Allocation */
- REGW(0x3d4040, 0x0086008200000000);
+ REGW(0x3d4040, 0x0086008200000000UL);
/* Init_109: CI Port 4 Rx Cmd Buffer Allocation */
- REGW(0x3d4050, 0x0808080800000000);
+ REGW(0x3d4050, 0x0808080800000000UL);
/* Init_110: CI Port 4 Rx Dat Buffer Allocation */
- REGW(0x3d4060, 0x8648000000000000);
+ REGW(0x3d4060, 0x8648000000000000UL);
/* Init_111: CI Port 5 Tx Cmd Buffer Allocation */
- REGW(0x3d5030, 0x0808080800000000);
+ REGW(0x3d5030, 0x0808080800000000UL);
/* Init_112: CI Port 5 Tx Dat Buffer Allocation */
- REGW(0x3d5040, 0x0086008200000000);
+ REGW(0x3d5040, 0x0086008200000000UL);
/* Init_113: CI Port 5 Rx Cmd Buffer Allocation */
- REGW(0x3d5050, 0x0808080800000000);
+ REGW(0x3d5050, 0x0808080800000000UL);
/* Init_114: CI Port 5 Rx Dat Buffer Allocation */
- REGW(0x3d5060, 0x8648000000000000);
+ REGW(0x3d5060, 0x8648000000000000UL);
/* Init_115: CI Port 6 Tx Cmd Buffer Allocation */
- REGW(0x3d6030, 0x0808080800000000);
+ REGW(0x3d6030, 0x0808080800000000UL);
/* Init_116: CI Port 6 Tx Dat Buffer Allocation */
- REGW(0x3d6040, 0x0086008200000000);
+ REGW(0x3d6040, 0x0086008200000000UL);
/* Init_117: CI Port 6 Rx Cmd Buffer Allocation */
- REGW(0x3d6050, 0x0808080800000000);
+ REGW(0x3d6050, 0x0808080800000000UL);
/* Init_118: CI Port 6 Rx Dat Buffer Allocation */
- REGW(0x3d6060, 0x8648000000000000);
+ REGW(0x3d6060, 0x8648000000000000UL);
/* Init_119: CI Port 7 Tx Cmd Buffer Allocation */
- REGW(0x3d7030, 0x0808080800000000);
+ REGW(0x3d7030, 0x0808080800000000UL);
/* Init_120: CI Port 7 Tx Dat Buffer Allocation */
- REGW(0x3d7040, 0x0086008200000000);
+ REGW(0x3d7040, 0x0086008200000000UL);
/* Init_121: CI Port 7 Rx Cmd Buffer Allocation */
- REGW(0x3d7050, 0x0808080800000000);
+ REGW(0x3d7050, 0x0808080800000000UL);
/* Init_122: CI Port 6 Rx Dat Buffer Allocation */
- REGW(0x3d7060, 0x8648000000000000);
+ REGW(0x3d7060, 0x8648000000000000UL);
/*** Channel ordering ***/
/* Init_123: CI Port 1 Ordering */
- REGW(0x3d1070, 0x73D0735E00000000);
+ REGW(0x3d1070, 0x73D0735E00000000UL);
/* Init_124: CI Port 2 Ordering */
- REGW(0x3d2070, 0x73D0735E00000000);
+ REGW(0x3d2070, 0x73D0735E00000000UL);
/* Init_125: CI Port 3 Ordering */
- REGW(0x3d3070, 0x73D0735E00000000);
+ REGW(0x3d3070, 0x73D0735E00000000UL);
/* Init_126: CI Port 4 Ordering */
- REGW(0x3d4070, 0x73D0735E00000000);
+ REGW(0x3d4070, 0x73D0735E00000000UL);
/* Init_127: CI Port 5 Ordering */
- REGW(0x3d5070, 0x73D0735E00000000);
+ REGW(0x3d5070, 0x73D0735E00000000UL);
/* Init_128: CI Port 6 Ordering */
- REGW(0x3d6070, 0x73D0735E00000000);
+ REGW(0x3d6070, 0x73D0735E00000000UL);
/* Init_129: CI POrt 7 Ordering */
- REGW(0x3d7070, 0x73D0735E00000000);
+ REGW(0x3d7070, 0x73D0735E00000000UL);
/*** Setup routing (port 0 only) */
@@ -843,21 +843,21 @@ static void p7ioc_init_CI(struct p7ioc *ioc)
*/
/* Init_226: CI Port 1 Configuration */
- REGW(0x3d1000, 0x023F0FCF07200002);
+ REGW(0x3d1000, 0x023F0FCF07200002UL);
/* Init_227: CI Port 2 Configuration */
- REGW(0x3d2000, 0x023F00C307200002);
+ REGW(0x3d2000, 0x023F00C307200002UL);
/* Init_228: CI Port 3 Configuration */
- REGW(0x3d3000, 0x023F00C307200002);
+ REGW(0x3d3000, 0x023F00C307200002UL);
/* Init_229: CI Port 4 Configuration */
- REGW(0x3d4000, 0x023F00C307200002);
+ REGW(0x3d4000, 0x023F00C307200002UL);
/* Init_230: CI Port 5 Configuration */
- REGW(0x3d5000, 0x023F00C307200002);
+ REGW(0x3d5000, 0x023F00C307200002UL);
/* Init_231: CI Port 6 Configuration */
- REGW(0x3d6000, 0x023F00C307200002);
+ REGW(0x3d6000, 0x023F00C307200002UL);
/* Init_232: CI Port 7 Configuration */
- REGW(0x3d7000, 0x023F00C307200002);
+ REGW(0x3d7000, 0x023F00C307200002UL);
/* Init_233: CI Port 0 Configuration */
- REGW(0x3d0000, 0x023F00C0073F0002);
+ REGW(0x3d0000, 0x023F00C0073F0002UL);
}
static void p7ioc_init_PHBs(struct p7ioc *ioc)
@@ -882,30 +882,30 @@ static void p7ioc_init_MISC(struct p7ioc *ioc)
/*** Set LEM regs ***/
/* Init_1: LEM FIR Accumulator */
- REGW(0x3ea000, 0x0000000000000000);
+ REGW(0x3ea000, 0x0000000000000000UL);
/* Init_2: LEM Action 0 */
- REGW(0x3ea030, 0xFFFFFFFCEE3FFFFF);
+ REGW(0x3ea030, 0xFFFFFFFCEE3FFFFFUL);
/* Init_3: LEM Action 1 */
- REGW(0x3ea038, 0x0000000001C00000);
+ REGW(0x3ea038, 0x0000000001C00000UL);
/* Init_4: LEM WOF */
- REGW(0x3ea040, 0x0000000000000000);
+ REGW(0x3ea040, 0x0000000000000000UL);
/* Init_5: LEM Mask (AND write) */
- REGW(0x3ea020, 0x000F03F0CD3FFFFF);
+ REGW(0x3ea020, 0x000F03F0CD3FFFFFUL);
/* Init_5.1: I2C LEM FIR Accumulator */
- REGW(0x3eb000, 0x0000000000000000);
+ REGW(0x3eb000, 0x0000000000000000UL);
/* Init_5.2: I2C LEM Action 0 */
- REGW(0x3eb030, 0xEE00000000000000);
+ REGW(0x3eb030, 0xEE00000000000000UL);
/* Init_5.3: I2C LEM Action 1 */
- REGW(0x3eb038, 0x0000000000000000);
+ REGW(0x3eb038, 0x0000000000000000UL);
/* Init_5.4: I2C LEM WOF */
- REGW(0x3eb040, 0x0000000000000000);
+ REGW(0x3eb040, 0x0000000000000000UL);
/* Init_5.5: I2C LEM Mask (AND write) */
- REGW(0x3eb020, 0x4600000000000000);
+ REGW(0x3eb020, 0x4600000000000000UL);
/*** Set RGC GP bits (error enables) ***/
/* Init_7: RGC GP0 control (enable umux errors) */
- REGW(0x3e1018, 0x8888880000000000);
+ REGW(0x3e1018, 0x8888880000000000ULL);
/*** Central Trace Setup ***
*
@@ -914,120 +914,120 @@ static void p7ioc_init_MISC(struct p7ioc *ioc)
*/
/* Init_8: */
- REGW(0x3ea810, 0x5000000000000000);
+ REGW(0x3ea810, 0x5000000000000000UL);
/* Init_9: */
- REGW(0x3ea800, 0x0000000000000000);
+ REGW(0x3ea800, 0x0000000000000000UL);
/* Init_10: */
- REGW(0x3ea808, 0xB0000000F0000000);
+ REGW(0x3ea808, 0xB0000000F0000000UL);
/* Init_11: */
- REGW(0x3ea818, 0xF4F00FFF00000000);
+ REGW(0x3ea818, 0xF4F00FFF00000000UL);
/* Init_12: */
- REGW(0x3ea820, 0x0000000000000000);
+ REGW(0x3ea820, 0x0000000000000000UL);
/* Init_13: */
- REGW(0x3ea828, 0x0000000000000000);
+ REGW(0x3ea828, 0x0000000000000000UL);
/* Init_14: */
- REGW(0x3ea830, 0x0000000000000000);
+ REGW(0x3ea830, 0x0000000000000000UL);
/* Init_15: */
- REGW(0x3ea838, 0x0000000000000000);
+ REGW(0x3ea838, 0x0000000000000000UL);
/* Init_16: */
- REGW(0x3ea840, 0x0000000000000000);
+ REGW(0x3ea840, 0x0000000000000000UL);
/* Init_17: */
- REGW(0x3ea878, 0x0300000000000000);
+ REGW(0x3ea878, 0x0300000000000000UL);
/* Init_18: PHB0 mux select (Rx/Tx) */
- REGW(0x000F80, 0x0000000000000000);
+ REGW(0x000F80, 0x0000000000000000UL);
/* Init_19: PHB1 mux select (Rx/Tx) */
- REGW(0x010F80, 0x0000000000000000);
+ REGW(0x010F80, 0x0000000000000000UL);
/* Init_19.0: PHB2 mux select (Rx/Tx) */
- REGW(0x020F80, 0x0000000000000000);
+ REGW(0x020F80, 0x0000000000000000UL);
/* Init_19.1: PHB3 mux select (Rx/Tx) */
- REGW(0x030F80, 0x0000000000000000);
+ REGW(0x030F80, 0x0000000000000000UL);
/* Init_19.2: PHB4 mux select (Rx/Tx) */
- REGW(0x040F80, 0x0000000000000000);
+ REGW(0x040F80, 0x0000000000000000UL);
/* Init_19.3: PHB5 mux select (Rx/Tx) */
- REGW(0x050F80, 0x0000000000000000);
+ REGW(0x050F80, 0x0000000000000000UL);
/* Init_20: */
- REGW(0x3ea880, 0x40008000FF7F0000);
+ REGW(0x3ea880, 0x40008000FF7F0000UL);
/* Init_21: */
- REGW(0x3ea888, 0x0000000000000000);
+ REGW(0x3ea888, 0x0000000000000000UL);
/* Init_22: */
- REGW(0x3ea890, 0x0000000000000000);
+ REGW(0x3ea890, 0x0000000000000000UL);
/* Init_23: */
- REGW(0x3ea898, 0x0000000000000000);
+ REGW(0x3ea898, 0x0000000000000000UL);
/* Init_24: */
- REGW(0x3ea8a0, 0x8000000000000000);
+ REGW(0x3ea8a0, 0x8000000000000000UL);
/* Init_25: */
- REGW(0x3ea8a8, 0x0000000000000000);
+ REGW(0x3ea8a8, 0x0000000000000000UL);
/* Init_26: */
- REGW(0x3ea8b0, 0x8000000000000000);
+ REGW(0x3ea8b0, 0x8000000000000000UL);
/* Init_27: */
- REGW(0x3ea8b8, 0x0000000000000000);
+ REGW(0x3ea8b8, 0x0000000000000000UL);
/* Init_28: */
- REGW(0x3ea8c0, 0x0000000000000000);
+ REGW(0x3ea8c0, 0x0000000000000000UL);
/* Init_29: */
- REGW(0x3ea900, 0x40008000FF7F0000);
+ REGW(0x3ea900, 0x40008000FF7F0000UL);
/* Init_30: */
- REGW(0x3ea908, 0x0000000000000000);
+ REGW(0x3ea908, 0x0000000000000000UL);
/* Init_31: */
- REGW(0x3ea910, 0x0000000000000000);
+ REGW(0x3ea910, 0x0000000000000000UL);
/* Init_32: */
- REGW(0x3ea918, 0x0000000000000000);
+ REGW(0x3ea918, 0x0000000000000000UL);
/* Init_33: */
- REGW(0x3ea920, 0x8000000000000000);
+ REGW(0x3ea920, 0x8000000000000000UL);
/* Init_34: */
- REGW(0x3ea928, 0x0000000000000000);
+ REGW(0x3ea928, 0x0000000000000000UL);
/* Init_35: */
- REGW(0x3ea930, 0x8000000000000000);
+ REGW(0x3ea930, 0x8000000000000000UL);
/* Init_36: */
- REGW(0x3ea938, 0x0000000000000000);
+ REGW(0x3ea938, 0x0000000000000000UL);
/* Init_37: */
- REGW(0x3ea940, 0x0000000000000000);
+ REGW(0x3ea940, 0x0000000000000000UL);
/* Init_38: */
- REGW(0x3ea980, 0x40008000FF7F0000);
+ REGW(0x3ea980, 0x40008000FF7F0000UL);
/* Init_39: */
- REGW(0x3ea988, 0x0000000000000000);
+ REGW(0x3ea988, 0x0000000000000000UL);
/* Init_40: */
- REGW(0x3ea990, 0x0000000000000000);
+ REGW(0x3ea990, 0x0000000000000000UL);
/* Init_41: */
- REGW(0x3ea998, 0x0000000000000000);
+ REGW(0x3ea998, 0x0000000000000000UL);
/* Init_42: */
- REGW(0x3ea9a0, 0x8000000000000000);
+ REGW(0x3ea9a0, 0x8000000000000000UL);
/* Init_43: */
- REGW(0x3ea9a8, 0x0000000000000000);
+ REGW(0x3ea9a8, 0x0000000000000000UL);
/* Init_44: */
- REGW(0x3ea9b0, 0x8000000000000000);
+ REGW(0x3ea9b0, 0x8000000000000000UL);
/* Init_45: */
- REGW(0x3ea9b8, 0x0000000000000000);
+ REGW(0x3ea9b8, 0x0000000000000000UL);
/* Init_46: */
- REGW(0x3ea9c0, 0x0000000000000000);
+ REGW(0x3ea9c0, 0x0000000000000000UL);
/* Init_47: */
- REGW(0x3eaa00, 0x40008000FF7F0000);
+ REGW(0x3eaa00, 0x40008000FF7F0000UL);
/* Init_48: */
- REGW(0x3eaa08, 0x0000000000000000);
+ REGW(0x3eaa08, 0x0000000000000000UL);
/* Init_49: */
- REGW(0x3eaa10, 0x0000000000000000);
+ REGW(0x3eaa10, 0x0000000000000000UL);
/* Init_50: */
- REGW(0x3eaa18, 0x0000000000000000);
+ REGW(0x3eaa18, 0x0000000000000000UL);
/* Init_51: */
- REGW(0x3eaa20, 0x8000000000000000);
+ REGW(0x3eaa20, 0x8000000000000000UL);
/* Init_52: */
- REGW(0x3eaa28, 0x0000000000000000);
+ REGW(0x3eaa28, 0x0000000000000000UL);
/* Init_53: */
- REGW(0x3eaa30, 0x8000000000000000);
+ REGW(0x3eaa30, 0x8000000000000000UL);
/* Init_54: */
- REGW(0x3eaa38, 0x0000000000000000);
+ REGW(0x3eaa38, 0x0000000000000000UL);
/* Init_55: */
- REGW(0x3eaa40, 0x0000000000000000);
+ REGW(0x3eaa40, 0x0000000000000000UL);
/* Init_56: */
- REGW(0x3ea810, 0x1000000000000000);
+ REGW(0x3ea810, 0x1000000000000000UL);
/* Init_57: */
- REGW(0x3ea810, 0x8000000000000000);
+ REGW(0x3ea810, 0x8000000000000000UL);
/*** I2C Master init fixup */
/* Init_58: I2C Master Operation Control */
- REGW(0x3eb0a8, 0x8100000000000000);
+ REGW(0x3eb0a8, 0x8100000000000000UL);
}
static void p7ioc_init_GEM(struct p7ioc *ioc)
@@ -1055,7 +1055,7 @@ static void p7ioc_init_GEM(struct p7ioc *ioc)
/* Init_4: GEM XFIR */
REGW(0x3e0008, 0x0000000000000000);
/* Init_5: GEM Mask (See FIXME) */
- REGW(0x3e0020, 0x000F033FFFFFFFFF);
+ REGW(0x3e0020, 0x000F033FFFFFFFFFUL);
/* Init_6: GEM WOF */
REGW(0x3e0028, 0x0000000000000000);
}
diff --git a/hw/p7ioc-phb.c b/hw/p7ioc-phb.c
index 5167832..97e4885 100644
--- a/hw/p7ioc-phb.c
+++ b/hw/p7ioc-phb.c
@@ -26,9 +26,9 @@
#include <opal.h>
#include <ccan/str/str.h>
-#define PHBDBG(p, fmt, a...) prlog(PR_DEBUG, "PHB%d: " fmt, \
+#define PHBDBG(p, fmt, a...) prlog(PR_DEBUG, "PHB#%04x: " fmt, \
(p)->phb.opal_id, ## a)
-#define PHBERR(p, fmt, a...) prlog(PR_ERR, "PHB%d: " fmt, \
+#define PHBERR(p, fmt, a...) prlog(PR_ERR, "PHB#%04x: " fmt, \
(p)->phb.opal_id, ## a)
/* Helper to select an IODA table entry */
@@ -77,7 +77,7 @@ static bool p7ioc_phb_fenced(struct p7ioc_phb *p)
struct p7ioc *ioc = p->ioc;
uint64_t fence, fbits;
- fbits = 0x0003000000000000 >> (p->index * 4);
+ fbits = 0x0003000000000000UL >> (p->index * 4);
fence = in_be64(ioc->regs + P7IOC_CHIP_FENCE_SHADOW);
return (fence & fbits) != 0;
@@ -241,7 +241,7 @@ static int64_t p7ioc_sm_freset(struct p7ioc_phb *p)
}
/* Mask PCIE port interrupts and AER receiver error */
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7E00000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7E00000000000000UL);
p7ioc_pcicfg_read32(&p->phb, 0,
p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
cfg32 |= PCIECAP_AER_CE_RECVR_ERR;
@@ -311,9 +311,9 @@ static int64_t p7ioc_sm_freset(struct p7ioc_phb *p)
* interrupts
*/
out_be64(p->regs + UTL_PCIE_PORT_STATUS,
- 0x00E0000000000000);
+ 0x00E0000000000000UL);
out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN,
- 0xFE65000000000000);
+ 0xFE65000000000000UL);
/* Clear AER receiver error status */
p7ioc_pcicfg_write32(&p->phb, 0,
@@ -448,7 +448,7 @@ static int64_t p7ioc_sm_slot_power_off(struct p7ioc_phb *p)
* PHB slot. Otherwise, it won't take effect. That's the
* similar thing as we did for power-on.
*/
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000UL);
reg = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
reg &= ~(0x8c00000000000000ul);
reg |= 0x8400000000000000ul;
@@ -530,7 +530,7 @@ static int64_t p7ioc_sm_slot_power_on(struct p7ioc_phb *p)
/* Adjust UTL interrupt settings to disable various
* errors that would interfere with the process
*/
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000UL);
/* If the power is not on, turn it on now */
if (!(reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)) {
@@ -699,7 +699,7 @@ static int64_t p7ioc_sm_hot_reset(struct p7ioc_phb *p)
}
/* Mask PCIE port interrupts and AER receiver error */
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7E00000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7E00000000000000UL);
p7ioc_pcicfg_read32(&p->phb, 0,
p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
cfg32 |= PCIECAP_AER_CE_RECVR_ERR;
@@ -760,8 +760,8 @@ static int64_t p7ioc_sm_hot_reset(struct p7ioc_phb *p)
* Clear spurious errors and enable PCIE port
* interrupts
*/
- out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x00E0000000000000);
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xFE65000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x00E0000000000000UL);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xFE65000000000000UL);
/* Clear AER receiver error status */
p7ioc_pcicfg_write32(&p->phb, 0,
@@ -1872,7 +1872,7 @@ static int64_t p7ioc_map_pe_dma_window(struct phb *phb, uint16_t pe_number,
case 0x1000000: /* 16M */
tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 13ul);
break;
- case 0x400000000: /* 16G */
+ case 0x400000000UL: /* 16G */
tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 23ul);
break;
default:
@@ -2344,7 +2344,7 @@ static void p7ioc_phb_init_ioda_cache(struct p7ioc_phb *p)
* last entry is to encompass all RIDs.
*/
for (i = 0; i < 127; i++)
- p->peltm_cache[i] = 0x0001f80000000000;
+ p->peltm_cache[i] = 0x0001f80000000000UL;
p->peltm_cache[127] = 0x0ul;
for (i = 0; i < 128; i++) {
@@ -2929,7 +2929,7 @@ void p7ioc_phb_setup(struct p7ioc *ioc, uint8_t index)
/* We register the PHB before we initialize it so we
* get a useful OPAL ID for it
*/
- pci_register_phb(&p->phb);
+ pci_register_phb(&p->phb, OPAL_DYNAMIC_PHB_ID);
/* Platform additional setup */
if (platform.pci_setup_phb)
@@ -3114,83 +3114,83 @@ static void p7ioc_phb_init_utl(struct p7ioc_phb *p)
/* Init_82..84: Clear spurious errors and assign errors to the
* right "interrupt" signal
*/
- out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS, 0xffffffffffffffff);
- out_be64(p->regs + UTL_SYS_BUS_AGENT_ERR_SEVERITY, 0x0000000000000000);
- out_be64(p->regs + UTL_SYS_BUS_AGENT_IRQ_EN, 0xac80000000000000);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS, 0xffffffffffffffffUL);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_ERR_SEVERITY, 0x0000000000000000UL);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_IRQ_EN, 0xac80000000000000UL);
/* Init_85..89: Setup buffer allocations */
- out_be64(p->regs + UTL_OUT_POST_DAT_BUF_ALLOC, 0x0400000000000000);
- out_be64(p->regs + UTL_IN_POST_HDR_BUF_ALLOC, 0x1000000000000000);
- out_be64(p->regs + UTL_IN_POST_DAT_BUF_ALLOC, 0x4000000000000000);
- out_be64(p->regs + UTL_PCIE_TAGS_ALLOC, 0x0800000000000000);
- out_be64(p->regs + UTL_GBIF_READ_TAGS_ALLOC, 0x0800000000000000);
+ out_be64(p->regs + UTL_OUT_POST_DAT_BUF_ALLOC, 0x0400000000000000UL);
+ out_be64(p->regs + UTL_IN_POST_HDR_BUF_ALLOC, 0x1000000000000000UL);
+ out_be64(p->regs + UTL_IN_POST_DAT_BUF_ALLOC, 0x4000000000000000UL);
+ out_be64(p->regs + UTL_PCIE_TAGS_ALLOC, 0x0800000000000000UL);
+ out_be64(p->regs + UTL_GBIF_READ_TAGS_ALLOC, 0x0800000000000000UL);
/* Init_90: PCI Express port control */
- out_be64(p->regs + UTL_PCIE_PORT_CONTROL, 0x8480000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_CONTROL, 0x8480000000000000UL);
/* Init_91..93: Clean & setup port errors */
- out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xff7fffffffffffff);
- out_be64(p->regs + UTL_PCIE_PORT_ERROR_SEV, 0x00e0000000000000);
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e65000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xff7fffffffffffffUL);
+ out_be64(p->regs + UTL_PCIE_PORT_ERROR_SEV, 0x00e0000000000000UL);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e65000000000000UL);
/* Init_94 : Cleanup RC errors */
- out_be64(p->regs + UTL_RC_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + UTL_RC_STATUS, 0xffffffffffffffffUL);
}
static void p7ioc_phb_init_errors(struct p7ioc_phb *p)
{
/* Init_98: LEM Error Mask : Temporarily disable error interrupts */
- out_be64(p->regs + PHB_LEM_ERROR_MASK, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_LEM_ERROR_MASK, 0xffffffffffffffffUL);
/* Init_99..107: Configure main error traps & clear old state */
- out_be64(p->regs + PHB_ERR_STATUS, 0xffffffffffffffff);
- out_be64(p->regs + PHB_ERR1_STATUS, 0x0000000000000000);
- out_be64(p->regs + PHB_ERR_LEM_ENABLE, 0xffffffffefffffff);
- out_be64(p->regs + PHB_ERR_FREEZE_ENABLE, 0x0000000061c00000);
- out_be64(p->regs + PHB_ERR_AIB_FENCE_ENABLE, 0xffffffc58c000000);
- out_be64(p->regs + PHB_ERR_LOG_0, 0x0000000000000000);
- out_be64(p->regs + PHB_ERR_LOG_1, 0x0000000000000000);
- out_be64(p->regs + PHB_ERR_STATUS_MASK, 0x0000000000000000);
- out_be64(p->regs + PHB_ERR1_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR_STATUS, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_ERR1_STATUS, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_ERR_LEM_ENABLE, 0xffffffffefffffffUL);
+ out_be64(p->regs + PHB_ERR_FREEZE_ENABLE, 0x0000000061c00000UL);
+ out_be64(p->regs + PHB_ERR_AIB_FENCE_ENABLE, 0xffffffc58c000000UL);
+ out_be64(p->regs + PHB_ERR_LOG_0, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_ERR_LOG_1, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_ERR_STATUS_MASK, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_ERR1_STATUS_MASK, 0x0000000000000000UL);
/* Init_108_116: Configure MMIO error traps & clear old state */
- out_be64(p->regs + PHB_OUT_ERR_STATUS, 0xffffffffffffffff);
- out_be64(p->regs + PHB_OUT_ERR1_STATUS, 0x0000000000000000);
- out_be64(p->regs + PHB_OUT_ERR_LEM_ENABLE, 0xffffffffffffffff);
- out_be64(p->regs + PHB_OUT_ERR_FREEZE_ENABLE, 0x0000430803000000);
- out_be64(p->regs + PHB_OUT_ERR_AIB_FENCE_ENABLE, 0x9df3bc00f0f0700f);
- out_be64(p->regs + PHB_OUT_ERR_LOG_0, 0x0000000000000000);
- out_be64(p->regs + PHB_OUT_ERR_LOG_1, 0x0000000000000000);
- out_be64(p->regs + PHB_OUT_ERR_STATUS_MASK, 0x0000000000000000);
- out_be64(p->regs + PHB_OUT_ERR1_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR_STATUS, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_OUT_ERR_LEM_ENABLE, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_OUT_ERR_FREEZE_ENABLE, 0x0000430803000000UL);
+ out_be64(p->regs + PHB_OUT_ERR_AIB_FENCE_ENABLE, 0x9df3bc00f0f0700fUL);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_0, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_1, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_OUT_ERR_STATUS_MASK, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS_MASK, 0x0000000000000000UL);
/* Init_117_125: Configure DMA_A error traps & clear old state */
- out_be64(p->regs + PHB_INA_ERR_STATUS, 0xffffffffffffffff);
- out_be64(p->regs + PHB_INA_ERR1_STATUS, 0x0000000000000000);
- out_be64(p->regs + PHB_INA_ERR_LEM_ENABLE, 0xffffffffffffffff);
- out_be64(p->regs + PHB_INA_ERR_FREEZE_ENABLE, 0xc00003ff01006000);
- out_be64(p->regs + PHB_INA_ERR_AIB_FENCE_ENABLE, 0x3fff50007e559fd8);
- out_be64(p->regs + PHB_INA_ERR_LOG_0, 0x0000000000000000);
- out_be64(p->regs + PHB_INA_ERR_LOG_1, 0x0000000000000000);
- out_be64(p->regs + PHB_INA_ERR_STATUS_MASK, 0x0000000000000000);
- out_be64(p->regs + PHB_INA_ERR1_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR_STATUS, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INA_ERR_LEM_ENABLE, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_INA_ERR_FREEZE_ENABLE, 0xc00003ff01006000UL);
+ out_be64(p->regs + PHB_INA_ERR_AIB_FENCE_ENABLE, 0x3fff50007e559fd8UL);
+ out_be64(p->regs + PHB_INA_ERR_LOG_0, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INA_ERR_LOG_1, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INA_ERR_STATUS_MASK, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS_MASK, 0x0000000000000000UL);
/* Init_126_134: Configure DMA_B error traps & clear old state */
- out_be64(p->regs + PHB_INB_ERR_STATUS, 0xffffffffffffffff);
- out_be64(p->regs + PHB_INB_ERR1_STATUS, 0x0000000000000000);
- out_be64(p->regs + PHB_INB_ERR_LEM_ENABLE, 0xffffffffffffffff);
- out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE, 0x0000000000000000);
- out_be64(p->regs + PHB_INB_ERR_AIB_FENCE_ENABLE, 0x18ff80ffff7f0000);
- out_be64(p->regs + PHB_INB_ERR_LOG_0, 0x0000000000000000);
- out_be64(p->regs + PHB_INB_ERR_LOG_1, 0x0000000000000000);
- out_be64(p->regs + PHB_INB_ERR_STATUS_MASK, 0x0000000000000000);
- out_be64(p->regs + PHB_INB_ERR1_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_STATUS, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INB_ERR_LEM_ENABLE, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INB_ERR_AIB_FENCE_ENABLE, 0x18ff80ffff7f0000UL);
+ out_be64(p->regs + PHB_INB_ERR_LOG_0, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INB_ERR_LOG_1, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INB_ERR_STATUS_MASK, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS_MASK, 0x0000000000000000UL);
/* Init_135..138: Cleanup & configure LEM */
- out_be64(p->regs + PHB_LEM_FIR_ACCUM, 0x0000000000000000);
- out_be64(p->regs + PHB_LEM_ACTION0, 0xffffffffffffffff);
- out_be64(p->regs + PHB_LEM_ACTION1, 0x0000000000000000);
- out_be64(p->regs + PHB_LEM_WOF, 0x0000000000000000);
+ out_be64(p->regs + PHB_LEM_FIR_ACCUM, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_LEM_ACTION0, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_LEM_ACTION1, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_LEM_WOF, 0x0000000000000000UL);
}
/* p7ioc_phb_init - Initialize the PHB hardware
@@ -3223,7 +3223,7 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
* Interrupt Request=1, TCE Read=3.
*/
/* Init_1: AIB TX Channel Mapping */
- out_be64(p->regs_asb + PHB_AIB_TX_CHAN_MAPPING, 0x0211300000000000);
+ out_be64(p->regs_asb + PHB_AIB_TX_CHAN_MAPPING, 0x0211300000000000UL);
/*
* This group of steps initializes the AIB RX credits for
@@ -3241,11 +3241,11 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
*/
/* Init_2: AIB RX Command Credit */
- out_be64(p->regs_asb + PHB_AIB_RX_CMD_CRED, 0x0020002000200001);
+ out_be64(p->regs_asb + PHB_AIB_RX_CMD_CRED, 0x0020002000200001UL);
/* Init_3: AIB RX Data Credit */
- out_be64(p->regs_asb + PHB_AIB_RX_DATA_CRED, 0x0000002000000001);
+ out_be64(p->regs_asb + PHB_AIB_RX_DATA_CRED, 0x0000002000000001UL);
/* Init_4: AXIB RX Credit Init Timer */
- out_be64(p->regs_asb + PHB_AIB_RX_CRED_INIT_TIMER, 0xFF00000000000000);
+ out_be64(p->regs_asb + PHB_AIB_RX_CRED_INIT_TIMER, 0xFF00000000000000UL);
/*
* Enable all 32 AIB and TCE tags.
@@ -3256,9 +3256,9 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
*/
/* Init_5: PHB - AIB Tag Enable Register */
- out_be64(p->regs_asb + PHB_AIB_TAG_ENABLE, 0xFFFFFFFF00000000);
+ out_be64(p->regs_asb + PHB_AIB_TAG_ENABLE, 0xFFFFFFFF00000000UL);
/* Init_6: PHB – TCE Tag Enable Register */
- out_be64(p->regs_asb + PHB_TCE_TAG_ENABLE, 0xFFFFFFFF00000000);
+ out_be64(p->regs_asb + PHB_TCE_TAG_ENABLE, 0xFFFFFFFF00000000UL);
/* Init_7: PCIE - System Configuration Register
*
@@ -3275,7 +3275,7 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
* reduced to the allowed ranges from 128B
* to 2KB if needed.
*/
- out_be64(p->regs + PHB_PCIE_SYSTEM_CONFIG, 0x422800FC20000000);
+ out_be64(p->regs + PHB_PCIE_SYSTEM_CONFIG, 0x422800FC20000000UL);
/* Init_8: PHB - PCI-E Reset Register
*
@@ -3291,7 +3291,7 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
* NOTE: We perform a PERST at the end of the init sequence so
* we could probably skip that link training.
*/
- out_be64(p->regs + PHB_RESET, 0xE800000000000000);
+ out_be64(p->regs + PHB_RESET, 0xE800000000000000UL);
/* Init_9: BUID
*
@@ -3331,19 +3331,19 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
p7ioc_ioda_reset(&p->phb, false);
/* Init_42..47: Clear UTL & DLP error log regs */
- out_be64(p->regs + PHB_PCIE_UTL_ERRLOG1, 0xffffffffffffffff);
- out_be64(p->regs + PHB_PCIE_UTL_ERRLOG2, 0xffffffffffffffff);
- out_be64(p->regs + PHB_PCIE_UTL_ERRLOG3, 0xffffffffffffffff);
- out_be64(p->regs + PHB_PCIE_UTL_ERRLOG4, 0xffffffffffffffff);
- out_be64(p->regs + PHB_PCIE_DLP_ERRLOG1, 0xffffffffffffffff);
- out_be64(p->regs + PHB_PCIE_DLP_ERRLOG2, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG1, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG2, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG3, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG4, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_PCIE_DLP_ERRLOG1, 0xffffffffffffffffUL);
+ out_be64(p->regs + PHB_PCIE_DLP_ERRLOG2, 0xffffffffffffffffUL);
/* Init_48: Wait for DLP core to be out of reset */
if (!p7ioc_phb_wait_dlp_reset(p))
goto failed;
/* Init_49 - Clear port status */
- out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xffffffffffffffffUL);
/* Init_50..81: Init root complex config space */
if (!p7ioc_phb_init_rc_cfg(p))
@@ -3353,7 +3353,7 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
p7ioc_phb_init_utl(p);
/* Init_95: PCI-E Reset, deassert reset for internal error macros */
- out_be64(p->regs + PHB_RESET, 0xe000000000000000);
+ out_be64(p->regs + PHB_RESET, 0xe000000000000000UL);
/* Init_96: PHB Control register. Various PHB settings:
*
@@ -3361,7 +3361,7 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
* - Enable all TCAM entries
* - Set failed DMA read requests to return Completer Abort on error
*/
- out_be64(p->regs + PHB_CONTROL, 0x7f38000000000000);
+ out_be64(p->regs + PHB_CONTROL, 0x7f38000000000000UL);
/* Init_97: Legacy Control register
*
@@ -3406,18 +3406,18 @@ int64_t p7ioc_phb_init(struct p7ioc_phb *p)
*/
/* Init_145..149: Enable error interrupts and LEM */
- out_be64(p->regs + PHB_ERR_IRQ_ENABLE, 0x0000000061c00000);
- out_be64(p->regs + PHB_OUT_ERR_IRQ_ENABLE, 0x0000430803000000);
- out_be64(p->regs + PHB_INA_ERR_IRQ_ENABLE, 0xc00003ff01006000);
- out_be64(p->regs + PHB_INB_ERR_IRQ_ENABLE, 0x0000000000000000);
- out_be64(p->regs + PHB_LEM_ERROR_MASK, 0x1249a1147f500f2c);
+ out_be64(p->regs + PHB_ERR_IRQ_ENABLE, 0x0000000061c00000UL);
+ out_be64(p->regs + PHB_OUT_ERR_IRQ_ENABLE, 0x0000430803000000UL);
+ out_be64(p->regs + PHB_INA_ERR_IRQ_ENABLE, 0xc00003ff01006000UL);
+ out_be64(p->regs + PHB_INB_ERR_IRQ_ENABLE, 0x0000000000000000UL);
+ out_be64(p->regs + PHB_LEM_ERROR_MASK, 0x1249a1147f500f2cUL);
/* Init_150: Enable DMA read/write TLP address speculation */
- out_be64(p->regs + PHB_TCE_PREFETCH, 0x0000c00000000000);
+ out_be64(p->regs + PHB_TCE_PREFETCH, 0x0000c00000000000UL);
/* Init_151..152: Set various timeouts */
- out_be64(p->regs + PHB_TIMEOUT_CTRL1, 0x1611112010200000);
- out_be64(p->regs + PHB_TIMEOUT_CTRL2, 0x0000561300000000);
+ out_be64(p->regs + PHB_TIMEOUT_CTRL1, 0x1611112010200000UL);
+ out_be64(p->regs + PHB_TIMEOUT_CTRL2, 0x0000561300000000UL);
/* Mark the PHB as functional which enables all the various sequences */
p->state = P7IOC_PHB_STATE_FUNCTIONAL;
diff --git a/hw/p7ioc.c b/hw/p7ioc.c
index c1a4514..85a0a51 100644
--- a/hw/p7ioc.c
+++ b/hw/p7ioc.c
@@ -186,7 +186,6 @@ static int64_t p7ioc_get_diag_data(struct io_hub *hub,
}
static const struct io_hub_ops p7ioc_hub_ops = {
- .set_tce_mem = NULL, /* No set_tce_mem for p7ioc, we use FMTC */
.get_diag_data = p7ioc_get_diag_data,
.reset = p7ioc_reset,
};
@@ -235,7 +234,7 @@ static int64_t p7ioc_rgc_set_xive(void *data, uint32_t isn,
}
/* Update the XIVE. Don't care HRT entry on P7IOC */
- out_be64(ioc->regs + 0x3e1820, (0x0002000000000000 | irq));
+ out_be64(ioc->regs + 0x3e1820, (0x0002000000000000UL | irq));
xive = in_be64(ioc->regs + 0x3e1830);
xive = SETFIELD(IODA_XIVT_SERVER, xive, m_server);
xive = SETFIELD(IODA_XIVT_PRIORITY, xive, m_prio);
@@ -276,26 +275,26 @@ static bool p7ioc_err_bit(struct p7ioc *ioc, uint64_t wof)
/* EI won't create interrupt yet */
break;
case P7IOC_ERR_SRC_RGC:
- severity[P7IOC_ERR_CLASS_GXE] = 0xF00086E0F4FCFFFF;
- severity[P7IOC_ERR_CLASS_RGA] = 0x0000010000000000;
- severity[P7IOC_ERR_CLASS_INF] = 0x0FFF781F0B030000;
+ severity[P7IOC_ERR_CLASS_GXE] = 0xF00086E0F4FCFFFFUL;
+ severity[P7IOC_ERR_CLASS_RGA] = 0x0000010000000000UL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0FFF781F0B030000UL;
break;
case P7IOC_ERR_SRC_BI_UP:
- severity[P7IOC_ERR_CLASS_GXE] = 0xF7FFFFFF7FFFFFFF;
- severity[P7IOC_ERR_CLASS_INF] = 0x0800000080000000;
+ severity[P7IOC_ERR_CLASS_GXE] = 0xF7FFFFFF7FFFFFFFUL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0800000080000000UL;
break;
case P7IOC_ERR_SRC_BI_DOWN:
- severity[P7IOC_ERR_CLASS_GXE] = 0xDFFFF7F35F8000BF;
- severity[P7IOC_ERR_CLASS_INF] = 0x2000080CA07FFF40;
+ severity[P7IOC_ERR_CLASS_GXE] = 0xDFFFF7F35F8000BFUL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x2000080CA07FFF40UL;
break;
case P7IOC_ERR_SRC_CI_P0:
- severity[P7IOC_ERR_CLASS_GXE] = 0xF5FF000000000000;
- severity[P7IOC_ERR_CLASS_INF] = 0x0200FFFFFFFFFFFF;
- severity[P7IOC_ERR_CLASS_MAL] = 0x0800000000000000;
+ severity[P7IOC_ERR_CLASS_GXE] = 0xF5FF000000000000UL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0200FFFFFFFFFFFFUL;
+ severity[P7IOC_ERR_CLASS_MAL] = 0x0800000000000000UL;
break;
case P7IOC_ERR_SRC_CI_P1:
- severity[P7IOC_ERR_CLASS_GXE] = 0xFFFF000000000000;
- severity[P7IOC_ERR_CLASS_INF] = 0x0000FFFFFFFFFFFF;
+ severity[P7IOC_ERR_CLASS_GXE] = 0xFFFF000000000000UL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0000FFFFFFFFFFFFUL;
break;
case P7IOC_ERR_SRC_CI_P2:
case P7IOC_ERR_SRC_CI_P3:
@@ -303,19 +302,19 @@ static bool p7ioc_err_bit(struct p7ioc *ioc, uint64_t wof)
case P7IOC_ERR_SRC_CI_P5:
case P7IOC_ERR_SRC_CI_P6:
case P7IOC_ERR_SRC_CI_P7:
- severity[P7IOC_ERR_CLASS_GXE] = 0x5B0B000000000000;
- severity[P7IOC_ERR_CLASS_PHB] = 0xA4F4000000000000;
- severity[P7IOC_ERR_CLASS_INF] = 0x0000FFFFFFFFFFFF;
+ severity[P7IOC_ERR_CLASS_GXE] = 0x5B0B000000000000UL;
+ severity[P7IOC_ERR_CLASS_PHB] = 0xA4F4000000000000UL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0000FFFFFFFFFFFFUL;
break;
case P7IOC_ERR_SRC_MISC:
- severity[P7IOC_ERR_CLASS_GXE] = 0x0000000310000000;
- severity[P7IOC_ERR_CLASS_PLL] = 0x0000000001C00000;
- severity[P7IOC_ERR_CLASS_INF] = 0x555FFFF0EE3FFFFF;
- severity[P7IOC_ERR_CLASS_MAL] = 0xAAA0000C00000000;
+ severity[P7IOC_ERR_CLASS_GXE] = 0x0000000310000000UL;
+ severity[P7IOC_ERR_CLASS_PLL] = 0x0000000001C00000UL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x555FFFF0EE3FFFFFUL;
+ severity[P7IOC_ERR_CLASS_MAL] = 0xAAA0000C00000000UL;
break;
case P7IOC_ERR_SRC_I2C:
- severity[P7IOC_ERR_CLASS_GXE] = 0x1100000000000000;
- severity[P7IOC_ERR_CLASS_INF] = 0xEEFFFFFFFFFFFFFF;
+ severity[P7IOC_ERR_CLASS_GXE] = 0x1100000000000000UL;
+ severity[P7IOC_ERR_CLASS_INF] = 0xEEFFFFFFFFFFFFFFUL;
break;
case P7IOC_ERR_SRC_PHB0:
case P7IOC_ERR_SRC_PHB1:
@@ -323,9 +322,9 @@ static bool p7ioc_err_bit(struct p7ioc *ioc, uint64_t wof)
case P7IOC_ERR_SRC_PHB3:
case P7IOC_ERR_SRC_PHB4:
case P7IOC_ERR_SRC_PHB5:
- severity[P7IOC_ERR_CLASS_PHB] = 0xADB650CB808DD051;
- severity[P7IOC_ERR_CLASS_ER] = 0x0000A0147F50092C;
- severity[P7IOC_ERR_CLASS_INF] = 0x52490F2000222682;
+ severity[P7IOC_ERR_CLASS_PHB] = 0xADB650CB808DD051UL;
+ severity[P7IOC_ERR_CLASS_ER] = 0x0000A0147F50092CUL;
+ severity[P7IOC_ERR_CLASS_INF] = 0x52490F2000222682UL;
break;
}
@@ -402,7 +401,7 @@ bool p7ioc_check_LEM(struct p7ioc *ioc,
/* IOC would be broken upon broken FIR */
fir = in_be64(base + P7IOC_LEM_FIR_OFFSET);
- if (fir == 0xffffffffffffffff) {
+ if (fir == 0xffffffffffffffffUL) {
ioc->err.err_src = P7IOC_ERR_SRC_NONE;
ioc->err.err_class = P7IOC_ERR_CLASS_GXE;
goto err;
@@ -509,7 +508,7 @@ static bool p7ioc_check_GEM(struct p7ioc *ioc)
* Recov_6: go to GXE recovery?
*/
xfir = in_be64(ioc->regs + P7IOC_GEM_XFIR);
- if (xfir == 0xffffffffffffffff) {
+ if (xfir == 0xffffffffffffffffUL) {
ioc->err.err_src = P7IOC_ERR_SRC_NONE;
ioc->err.err_class = P7IOC_ERR_CLASS_GXE;
p7ioc_set_err_pending(ioc, true);
diff --git a/hw/p8-i2c.c b/hw/p8-i2c.c
index 848d400..66510f1 100644
--- a/hw/p8-i2c.c
+++ b/hw/p8-i2c.c
@@ -1036,6 +1036,7 @@ static void p8_i2c_free_request(struct i2c_request *req)
static inline uint32_t p8_i2c_get_bit_rate_divisor(uint32_t lb_freq,
uint32_t bus_speed)
{
+ assert(bus_speed > 0);
return (((lb_freq / bus_speed) - 1) / 4);
}
@@ -1043,6 +1044,8 @@ static inline uint64_t p8_i2c_get_poll_interval(uint32_t bus_speed)
{
uint64_t usec;
+ assert(bus_speed > 0);
+
/* Polling Interval = 8 * (1/bus_speed) * (1/10) -> convert to uSec */
usec = ((8 * USEC_PER_SEC) / (10 * bus_speed));
return usecs_to_tb(usec);
diff --git a/hw/phb3.c b/hw/phb3.c
index f20f5f8..adff5bc 100644
--- a/hw/phb3.c
+++ b/hw/phb3.c
@@ -53,11 +53,11 @@
static void phb3_init_hw(struct phb3 *p, bool first_init);
-#define PHBDBG(p, fmt, a...) prlog(PR_DEBUG, "PHB%d: " fmt, \
+#define PHBDBG(p, fmt, a...) prlog(PR_DEBUG, "PHB#%04x: " fmt, \
(p)->phb.opal_id, ## a)
-#define PHBINF(p, fmt, a...) prlog(PR_INFO, "PHB%d: " fmt, \
+#define PHBINF(p, fmt, a...) prlog(PR_INFO, "PHB#%04x: " fmt, \
(p)->phb.opal_id, ## a)
-#define PHBERR(p, fmt, a...) prlog(PR_ERR, "PHB%d: " fmt, \
+#define PHBERR(p, fmt, a...) prlog(PR_ERR, "PHB#%04x: " fmt, \
(p)->phb.opal_id, ## a)
/*
@@ -116,6 +116,36 @@ static bool phb3_fenced(struct phb3 *p)
return false;
}
+static void phb3_pcicfg_filter_rc_pref_window(struct pci_device *pd __unused,
+ struct pci_cfg_reg_filter *pcrf,
+ uint32_t offset, uint32_t len,
+ uint32_t *data, bool write)
+{
+ uint8_t *pdata;
+ uint32_t i;
+
+ /* Cache whatever we received */
+ if (write) {
+ pdata = &pcrf->data[offset - pcrf->start];
+ for (i = 0; i < len; i++, pdata++)
+ *pdata = (uint8_t)(*data >> (8 * i));
+ return;
+ }
+
+ /* Return whatever we cached */
+ *data = 0;
+ pdata = &pcrf->data[offset - pcrf->start + len - 1];
+ for (i = len; i > 0; i--, pdata--) {
+ *data = (*data) << 8;
+ if (offset + i == PCI_CFG_PREF_MEM_BASE) {
+ *data |= ((*pdata & 0xf0) | 0x1);
+ continue;
+ }
+
+ *data |= *pdata;
+ }
+}
+
/*
* Configuration space access
*
@@ -149,6 +179,33 @@ static int64_t phb3_pcicfg_check(struct phb3 *p, uint32_t bdfn,
return OPAL_SUCCESS;
}
+static void phb3_pcicfg_filter(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t len,
+ uint32_t *data, bool write)
+{
+ struct pci_device *pd;
+ struct pci_cfg_reg_filter *pcrf;
+ uint32_t flags;
+
+ /* FIXME: It harms the performance to search the PCI
+ * device which doesn't have any filters at all. So
+ * it's worthy to maintain a table in PHB to indicate
+ * the PCI devices who have filters. However, bitmap
+ * seems not supported by skiboot yet. To implement
+ * it after bitmap is supported.
+ */
+ pd = pci_find_dev(phb, bdfn);
+ pcrf = pd ? pci_find_cfg_reg_filter(pd, offset, len) : NULL;
+ if (!pcrf || !pcrf->func)
+ return;
+
+ flags = write ? PCI_REG_FLAG_WRITE : PCI_REG_FLAG_READ;
+ if ((pcrf->flags & flags) != flags)
+ return;
+
+ pcrf->func(pd, pcrf, offset, len, data, write);
+}
+
#define PHB3_PCI_CFG_READ(size, type) \
static int64_t phb3_pcicfg_read##size(struct phb *phb, uint32_t bdfn, \
uint32_t offset, type *data) \
@@ -189,6 +246,9 @@ static int64_t phb3_pcicfg_read##size(struct phb *phb, uint32_t bdfn, \
(offset & (4 - sizeof(type)))); \
} \
\
+ phb3_pcicfg_filter(phb, bdfn, offset, sizeof(type), \
+ (uint32_t *)data, false); \
+ \
return OPAL_SUCCESS; \
}
@@ -214,6 +274,9 @@ static int64_t phb3_pcicfg_write##size(struct phb *phb, uint32_t bdfn, \
return OPAL_HARDWARE; \
} \
\
+ phb3_pcicfg_filter(phb, bdfn, offset, sizeof(type), \
+ (uint32_t *)&data, true); \
+ \
addr = PHB_CA_ENABLE; \
addr = SETFIELD(PHB_CA_BDFN, addr, bdfn); \
addr = SETFIELD(PHB_CA_REG, addr, offset); \
@@ -424,30 +487,53 @@ static void phb3_endpoint_init(struct phb *phb,
static void phb3_check_device_quirks(struct phb *phb, struct pci_device *dev)
{
struct phb3 *p = phb_to_phb3(phb);
- u64 modectl;
u32 vdid;
u16 vendor, device;
- /* For these adapters, if they are directly under the PHB, we
- * adjust some settings for performances
- */
- xscom_read(p->chip_id, p->pe_xscom + 0x0b, &modectl);
+ if (dev->primary_bus != 0 &&
+ dev->primary_bus != 1)
+ return;
pci_cfg_read32(phb, dev->bdfn, 0, &vdid);
vendor = vdid & 0xffff;
device = vdid >> 16;
- if (vendor == 0x15b3 &&
- (device == 0x1003 || /* Travis3-EN (CX3) */
- device == 0x1011 || /* HydePark (ConnectIB) */
- device == 0x1013)) { /* GlacierPark (CX4) */
- /* Set disable_wr_scope_group bit */
- modectl |= PPC_BIT(14);
- } else {
- /* Clear disable_wr_scope_group bit */
- modectl &= ~PPC_BIT(14);
- }
- xscom_write(p->chip_id, p->pe_xscom + 0x0b, modectl);
+ if (dev->primary_bus == 1) {
+ u64 modectl;
+
+ /* For these adapters, if they are directly under the PHB, we
+ * adjust some settings for performances
+ */
+ xscom_read(p->chip_id, p->pe_xscom + 0x0b, &modectl);
+ if (vendor == 0x15b3 &&
+ (device == 0x1003 || /* Travis3-EN (CX3) */
+ device == 0x1011 || /* HydePark (ConnectIB) */
+ device == 0x1013)) { /* GlacierPark (CX4) */
+ /* Set disable_wr_scope_group bit */
+ modectl |= PPC_BIT(14);
+ } else {
+ /* Clear disable_wr_scope_group bit */
+ modectl &= ~PPC_BIT(14);
+ }
+
+ xscom_write(p->chip_id, p->pe_xscom + 0x0b, modectl);
+ } else if (dev->primary_bus == 0) {
+ if (vendor == 0x1014 && device == 0x03dc) {
+ uint32_t pref_hi, tmp;
+
+ pci_cfg_read32(phb, dev->bdfn,
+ PCI_CFG_PREF_MEM_BASE_U32, &pref_hi);
+ pci_cfg_write32(phb, dev->bdfn,
+ PCI_CFG_PREF_MEM_BASE_U32, ~pref_hi);
+ pci_cfg_read32(phb, dev->bdfn,
+ PCI_CFG_PREF_MEM_BASE_U32, &tmp);
+ if (tmp == pref_hi)
+ pci_add_cfg_reg_filter(dev,
+ PCI_CFG_PREF_MEM_BASE_U32, 12,
+ PCI_REG_FLAG_READ | PCI_REG_FLAG_WRITE,
+ phb3_pcicfg_filter_rc_pref_window);
+ }
+ }
}
static void phb3_device_init(struct phb *phb, struct pci_device *dev)
@@ -456,8 +542,7 @@ static void phb3_device_init(struct phb *phb, struct pci_device *dev)
int aercap = 0;
/* Some special adapter tweaks for devices directly under the PHB */
- if (dev->primary_bus == 1)
- phb3_check_device_quirks(phb, dev);
+ phb3_check_device_quirks(phb, dev);
/* Figure out PCIe & AER capability */
if (pci_has_cap(dev, PCI_CFG_CAP_ID_EXP, false)) {
@@ -979,7 +1064,7 @@ static int64_t phb3_map_pe_dma_window_real(struct phb *phb,
uint64_t pci_mem_size)
{
struct phb3 *p = phb_to_phb3(phb);
- uint64_t end = pci_start_addr + pci_mem_size;
+ uint64_t end;
uint64_t tve;
if (pe_num >= PHB3_MAX_PE_NUM ||
@@ -1562,8 +1647,8 @@ static int64_t phb3_msi_get_xive(void *data,
uint32_t chip, index, irq;
uint64_t ive;
- chip = P8_IRQ_TO_CHIP(isn);
- index = P8_IRQ_TO_PHB(isn);
+ chip = p8_irq_to_chip(isn);
+ index = p8_irq_to_phb(isn);
irq = PHB3_IRQ_NUM(isn);
if (chip != p->chip_id ||
@@ -1592,8 +1677,8 @@ static int64_t phb3_msi_set_xive(void *data,
uint64_t *cache, ive_num, data64, m_server, m_prio;
uint32_t *ive;
- chip = P8_IRQ_TO_CHIP(isn);
- index = P8_IRQ_TO_PHB(isn);
+ chip = p8_irq_to_chip(isn);
+ index = p8_irq_to_phb(isn);
ive_num = PHB3_IRQ_NUM(isn);
if (p->state == PHB3_STATE_BROKEN || !p->tbl_rtt)
@@ -1656,8 +1741,8 @@ static int64_t phb3_lsi_get_xive(void *data,
uint32_t chip, index, irq;
uint64_t lxive;
- chip = P8_IRQ_TO_CHIP(isn);
- index = P8_IRQ_TO_PHB(isn);
+ chip = p8_irq_to_chip(isn);
+ index = p8_irq_to_phb(isn);
irq = PHB3_IRQ_NUM(isn);
if (chip != p->chip_id ||
@@ -1682,8 +1767,8 @@ static int64_t phb3_lsi_set_xive(void *data,
uint32_t chip, index, irq, entry;
uint64_t lxive;
- chip = P8_IRQ_TO_CHIP(isn);
- index = P8_IRQ_TO_PHB(isn);
+ chip = p8_irq_to_chip(isn);
+ index = p8_irq_to_phb(isn);
irq = PHB3_IRQ_NUM(isn);
if (p->state == PHB3_STATE_BROKEN)
@@ -1960,7 +2045,7 @@ static void phb3_setup_for_link_up(struct phb3 *p)
/* Clear spurrious errors and enable PCIE port interrupts */
out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xffdfffffffffffff);
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad5a800000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad52800000000000);
/* Mark link up */
p->has_link = true;
@@ -3288,15 +3373,6 @@ static int64_t phb3_set_capi_mode(struct phb *phb, uint64_t mode,
return OPAL_BUSY;
}
- xscom_read(p->chip_id, CAPP_ERR_STATUS_CTRL, &reg);
- if ((reg & PPC_BIT(5))) {
- PHBERR(p, "CAPP: recovery failed (%016llx)\n", reg);
- return OPAL_HARDWARE;
- } else if ((reg & PPC_BIT(0)) && (!(reg & PPC_BIT(1)))) {
- PHBDBG(p, "CAPP: recovery in progress\n");
- return OPAL_BUSY;
- }
-
if (mode == OPAL_PHB_CAPI_MODE_PCIE)
return OPAL_UNSUPPORTED;
@@ -3763,7 +3839,7 @@ static void phb3_init_utl(struct phb3 *p)
out_be64(p->regs + UTL_PCIE_PORT_ERROR_SEV, 0x5039000000000000);
if (p->has_link)
- out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad5a800000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad52800000000000);
else
out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad42800000000000);
@@ -4249,6 +4325,8 @@ static void phb3_create(struct dt_node *np)
struct phb3 *p = zalloc(sizeof(struct phb3));
size_t lane_eq_len;
struct dt_node *iplp;
+ struct proc_chip *chip;
+ int opal_id;
char *path;
assert(p);
@@ -4300,13 +4378,20 @@ static void phb3_create(struct dt_node *np)
p->has_link = false;
/* We register the PHB before we initialize it so we
- * get a useful OPAL ID for it
+ * get a useful OPAL ID for it. We use a different numbering here
+ * between Naples and Venice/Murano in order to leave room for the
+ * NPU on Naples.
*/
- pci_register_phb(&p->phb);
+ chip = next_chip(NULL); /* Just need any chip */
+ if (chip && chip->type == PROC_CHIP_P8_NAPLES)
+ opal_id = p->chip_id * 8 + p->index;
+ else
+ opal_id = p->chip_id * 4 + p->index;
+ pci_register_phb(&p->phb, opal_id);
/* Hello ! */
path = dt_get_path(np);
- PHBINF(p, "Found %s @%p\n", path, p->regs);
+ PHBINF(p, "Found %s @[%d:%d]\n", path, p->chip_id, p->index);
PHBINF(p, " M32 [0x%016llx..0x%016llx]\n",
p->mm1_base, p->mm1_base + p->mm1_size - 1);
PHBINF(p, " M64 [0x%016llx..0x%016llx]\n",
@@ -4484,7 +4569,7 @@ static void phb3_probe_pbcq(struct dt_node *pbcq)
/* Set the interrupt routing stuff, 8 relevant bits in mask
* (11 bits per PHB)
*/
- val = P8_CHIP_IRQ_PHB_BASE(gcid, pno);
+ val = p8_chip_irq_phb_base(gcid, pno);
val = (val << 45);
xscom_write(gcid, pe_xscom + 0x1a, val);
xscom_write(gcid, pe_xscom + 0x1b, 0xff00000000000000ul);
diff --git a/hw/psi.c b/hw/psi.c
index cb0dbab..0823ec8 100644
--- a/hw/psi.c
+++ b/hw/psi.c
@@ -523,7 +523,7 @@ void psi_irq_reset(void)
list_for_each(&psis, psi, list) {
/* Mask the interrupt & clean the XIVR */
- xivr = 0x000000ff00000000;
+ xivr = 0x000000ff00000000UL;
xivr |= P7_IRQ_BUID(psi->interrupt) << 16;
out_be64(psi->regs + PSIHB_XIVR, xivr);
diff --git a/hw/slw.c b/hw/slw.c
index a009090..b67097c 100644
--- a/hw/slw.c
+++ b/hw/slw.c
@@ -217,23 +217,6 @@ static bool slw_general_init(struct proc_chip *chip, struct cpu_thread *c)
prlog(PR_TRACE, "SLW: PMGP0 read 0x%016llx\n", tmp);
-
- /* Set CORE and ECO PFET Vret to select zero */
- rc = xscom_write(chip->id,
- XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_CORE_PFET_VRET), 0);
- if (rc) {
- log_simple_error(&e_info(OPAL_RC_SLW_INIT),
- "SLW: Failed to write PM_CORE_PFET_VRET\n");
- return false;
- }
- rc = xscom_write(chip->id,
- XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_CORE_ECO_VRET), 0);
- if (rc) {
- log_simple_error(&e_info(OPAL_RC_SLW_INIT),
- "SLW: Failed to write PM_CORE_ECO_VRET\n");
- return false;
- }
-
return true;
}
@@ -430,13 +413,13 @@ struct cpu_idle_states {
by searching PACA */
#define IDLE_USE_PMICR 0x00800000 /* Use SPR PMICR instruction */
-#define IDLE_FASTSLEEP_PMICR 0x0000002000000000
-#define IDLE_DEEPSLEEP_PMICR 0x0000003000000000
-#define IDLE_SLEEP_PMICR_MASK 0x0000003000000000
+#define IDLE_FASTSLEEP_PMICR 0x0000002000000000UL
+#define IDLE_DEEPSLEEP_PMICR 0x0000003000000000UL
+#define IDLE_SLEEP_PMICR_MASK 0x0000003000000000UL
-#define IDLE_FASTWINKLE_PMICR 0x0000000000200000
-#define IDLE_DEEPWINKLE_PMICR 0x0000000000300000
-#define IDLE_WINKLE_PMICR_MASK 0x0000000000300000
+#define IDLE_FASTWINKLE_PMICR 0x0000000000200000UL
+#define IDLE_DEEPWINKLE_PMICR 0x0000000000300000UL
+#define IDLE_WINKLE_PMICR_MASK 0x0000000000300000UL
static struct cpu_idle_states power7_cpu_idle_states[] = {
{ /* nap */
@@ -507,7 +490,7 @@ static struct cpu_idle_states power8_cpu_idle_states[] = {
};
/* Add device tree properties to describe idle states */
-static void add_cpu_idle_state_properties(void)
+void add_cpu_idle_state_properties(void)
{
struct dt_node *power_mgt;
struct cpu_idle_states *states;
@@ -518,7 +501,7 @@ static void add_cpu_idle_state_properties(void)
u8 i;
/* Buffers to hold idle state properties */
- char *name_buf;
+ char *name_buf, *alloced_name_buf;
u32 *latency_ns_buf;
u32 *residency_ns_buf;
u32 *flags_buf;
@@ -595,7 +578,8 @@ static void add_cpu_idle_state_properties(void)
*/
/* Allocate memory to idle state property buffers. */
- name_buf = (char *) malloc(nr_states * sizeof(char) * MAX_NAME_LEN);
+ alloced_name_buf= (char *) malloc(nr_states * sizeof(char) * MAX_NAME_LEN);
+ name_buf = alloced_name_buf;
latency_ns_buf = (u32 *) malloc(nr_states * sizeof(u32));
residency_ns_buf= (u32 *) malloc(nr_states * sizeof(u32));
flags_buf = (u32 *) malloc(nr_states * sizeof(u32));
@@ -661,7 +645,8 @@ static void add_cpu_idle_state_properties(void)
dt_add_property(power_mgt, "ibm,cpu-idle-state-pmicr-mask",
pmicr_mask_buf, num_supported_idle_states * sizeof(u64));
- free(name_buf);
+ assert(alloced_name_buf == name_buf);
+ free(alloced_name_buf);
free(latency_ns_buf);
free(residency_ns_buf);
free(flags_buf);
@@ -1234,7 +1219,5 @@ void slw_init(void)
for_each_chip(chip)
slw_init_chip(chip);
- add_cpu_idle_state_properties();
-
slw_init_timer();
}
diff --git a/hw/xscom.c b/hw/xscom.c
index c8e13be..a7a1705 100644
--- a/hw/xscom.c
+++ b/hw/xscom.c
@@ -422,7 +422,7 @@ int64_t xscom_read_cfam_chipid(uint32_t partid, uint32_t *chip_id)
* something up (Murano DD2.1)
*/
if (chip_quirk(QUIRK_NO_F000F))
- val = 0x221EF04980000000;
+ val = 0x221EF04980000000UL;
else
rc = xscom_read(partid, 0xf000f, &val);
diff --git a/include/ast.h b/include/ast.h
index 58adb6c..c7bf0cb 100644
--- a/include/ast.h
+++ b/include/ast.h
@@ -22,6 +22,7 @@
/* SPI Flash controller #1 (BMC) */
#define BMC_SPI_FCTL_BASE 0x1E620000
+#define BMC_SPI_FCTL_CE_CTRL (BMC_SPI_FCTL_BASE + 0x04)
#define BMC_SPI_FCTL_CTRL (BMC_SPI_FCTL_BASE + 0x10)
#define BMC_SPI_FREAD_TIMING (BMC_SPI_FCTL_BASE + 0x94)
#define BMC_FLASH_BASE 0x20000000
diff --git a/include/bitutils.h b/include/bitutils.h
index baa752b..a262db1 100644
--- a/include/bitutils.h
+++ b/include/bitutils.h
@@ -21,14 +21,17 @@
#ifdef __ASSEMBLY__
#define PPC_BIT(bit) (0x8000000000000000 >> (bit))
#define PPC_BIT32(bit) (0x80000000 >> (bit))
+#define PPC_BIT16(bit) (0x8000 >> (bit))
#define PPC_BIT8(bit) (0x80 >> (bit))
#else
#define PPC_BIT(bit) (0x8000000000000000UL >> (bit))
#define PPC_BIT32(bit) (0x80000000UL >> (bit))
+#define PPC_BIT16(bit) (0x8000UL >> (bit))
#define PPC_BIT8(bit) (0x80UL >> (bit))
#endif
#define PPC_BITMASK(bs,be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
#define PPC_BITMASK32(bs,be) ((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
+#define PPC_BITMASK16(bs,be) ((PPC_BIT16(bs) - PPC_BIT16(be))|PPC_BIT16(bs))
#define PPC_BITLSHIFT(be) (63 - (be))
#define PPC_BITLSHIFT32(be) (31 - (be))
diff --git a/include/cec.h b/include/cec.h
index 87cdc0e..97da17a 100644
--- a/include/cec.h
+++ b/include/cec.h
@@ -26,10 +26,6 @@
struct io_hub;
struct io_hub_ops {
- /* OPAL_PCI_SET_HUB_TCE_MEMORY (p5ioc2 only) */
- int64_t (*set_tce_mem)(struct io_hub *hub, uint64_t address,
- uint64_t size);
-
/* OPAL_PCI_GET_HUB_DIAG_DATA */
int64_t (*get_diag_data)(struct io_hub *hub, void *diag_buffer,
uint64_t diag_buffer_len);
diff --git a/include/chip.h b/include/chip.h
index 5951ef0..5109e25 100644
--- a/include/chip.h
+++ b/include/chip.h
@@ -129,6 +129,12 @@ struct proc_chip {
uint32_t dbob_id;
uint32_t pcid;
+ /* If we expect to have an OCC (i.e. P8) and it is functional,
+ * set TRUE. If something has told us it is not, set FALSE and
+ * we can not wait for OCCs to init. This is only going to be
+ * FALSE in a simulator that doesn't simulate OCCs. */
+ bool occ_functional;
+
/* Used by hw/xscom.c */
uint64_t xscom_base;
diff --git a/include/fsp-mdst-table.h b/include/fsp-mdst-table.h
index 21cb88c..5989cdb 100644
--- a/include/fsp-mdst-table.h
+++ b/include/fsp-mdst-table.h
@@ -40,9 +40,9 @@
* defined in HDAT spec.
*/
struct dump_mdst_table {
- uint64_t addr;
- uint32_t type; /* DUMP_SECTION_* */
- uint32_t size;
+ __be64 addr;
+ __be32 type; /* DUMP_SECTION_* */
+ __be32 size;
};
#endif /* __FSPMDST_H */
diff --git a/include/hostservices.h b/include/hostservices.h
index e85abc3..d6bb3e3 100644
--- a/include/hostservices.h
+++ b/include/hostservices.h
@@ -36,4 +36,7 @@ void host_services_occ_base_setup(void);
#define HOMER_IMAGE_SIZE 0x400000 /* 4MB per-chip */
#define OCC_COMMON_SIZE 0x800000 /* 8MB */
+int find_master_and_slave_occ(uint64_t **master, uint64_t **slave,
+ int *nr_masters, int *nr_slaves);
+
#endif /* __HOSTSERVICES_H */
diff --git a/include/interrupts.h b/include/interrupts.h
index 9239b86..d144dcd 100644
--- a/include/interrupts.h
+++ b/include/interrupts.h
@@ -152,19 +152,26 @@
* are naturally power-of-two aligned
*
* Our P8 Interrupt map consits thus of dividing the chip space
- * into 4 "blocks" of 2048 interrupts. Block 0 is for random chip
+ * into "blocks" of 2048 interrupts. Block 0 is for random chip
* interrupt sources (NX, PSI, OCC, ...) and keeps sources 0..15
- * clear to avoid conflits with IPIs etc.... Block 1..3 are assigned
- * to PHB 0..2 respectively.
+ * clear to avoid conflits with IPIs etc.... Block 1..n are assigned
+ * to PHB 0..n respectively. The number of blocks is determined by the
+ * number of bits assigned to chips.
*
* That gives us an interrupt number made of:
- * 18 13 12 11 10 0
+ * 18 n+1 n 11 10 0
* | | | | | |
* +--------------------+------+-----------------------------+
* | Chip# | PHB# | IVE# |
* +--------------------+------+-----------------------------+
*
- * We can thus support a max of 2^6 = 64 chips
+ * Where n = 18 - p8_chip_id_bits
+ *
+ * For P8 we have 6 bits for Chip# as defined by p8_chip_id_bits. We
+ * therefore support a max of 2^6 = 64 chips.
+ *
+ * For P8NVL we have an extra PHB and so we assign 5 bits for Chip#
+ * and therefore support a max of 32 chips.
*
* Each PHB supports 2K interrupt sources, which is shared by
* LSI and MSI. With default configuration, MSI would use range
@@ -174,21 +181,20 @@
*
*/
-#define P8_CHIP_IRQ_BASE(chip) ((chip) << 13)
-#define P8_CHIP_IRQ_BLOCK_BASE(chip, block) (P8_CHIP_IRQ_BASE(chip) \
- | ((block) << 11))
-#define P8_IRQ_BLOCK_MISC 0
-#define P8_IRQ_BLOCK_PHB0 1
-#define P8_IRQ_BLOCK_PHB1 2
-#define P8_IRQ_BLOCK_PHB2 3
+uint32_t p8_chip_irq_block_base(uint32_t chip, uint32_t block);
+uint32_t p8_chip_irq_phb_base(uint32_t chip, uint32_t phb);
+uint32_t p8_irq_to_chip(uint32_t irq);
+uint32_t p8_irq_to_block(uint32_t irq);
+uint32_t p8_irq_to_phb(uint32_t irq);
-#define P8_CHIP_IRQ_PHB_BASE(chip, phb) (P8_CHIP_IRQ_BLOCK_BASE(chip,\
- (phb) + P8_IRQ_BLOCK_PHB0))
+/* Total number of bits in the P8 interrupt space */
+#define P8_IRQ_BITS 19
-#define P8_IRQ_TO_CHIP(irq) (((irq) >> 13) & 0x3f)
-#define P8_IRQ_TO_BLOCK(irq) (((irq) >> 11) & 0x03)
-#define P8_IRQ_TO_PHB(irq) (P8_IRQ_TO_BLOCK(irq) - \
- P8_IRQ_BLOCK_PHB0)
+/* Number of bits per block */
+#define P8_IVE_BITS 11
+
+#define P8_IRQ_BLOCK_MISC 0
+#define P8_IRQ_BLOCK_PHB_BASE 1
/* Assignment of the "MISC" block:
* -------------------------------
diff --git a/include/ipmi.h b/include/ipmi.h
index 99a9094..a6791e4 100644
--- a/include/ipmi.h
+++ b/include/ipmi.h
@@ -117,6 +117,7 @@
#define IPMI_GET_MESSAGE_FLAGS IPMI_CODE(IPMI_NETFN_APP, 0x31)
#define IPMI_GET_MESSAGE IPMI_CODE(IPMI_NETFN_APP, 0x33)
#define IPMI_READ_EVENT IPMI_CODE(IPMI_NETFN_APP, 0x35)
+#define IPMI_GET_BT_CAPS IPMI_CODE(IPMI_NETFN_APP, 0x36)
#define IPMI_SET_SENSOR_READING IPMI_CODE(IPMI_NETFN_SE, 0x30)
/* AMI OEM comamnds. AMI uses NETFN 0x3a and 0x32 */
diff --git a/include/npu-regs.h b/include/npu-regs.h
new file mode 100644
index 0000000..f663a98
--- /dev/null
+++ b/include/npu-regs.h
@@ -0,0 +1,235 @@
+/* Copyright 2013-2015 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NPU_REGS_H
+#define __NPU_REGS_H
+
+/* Size of a single link */
+#define NPU_LINK_SIZE 0x40
+
+/* Link registers */
+#define NX_PB_ERR_RPT_0 0x00
+#define NX_PB_ERR_RPT_1 0x01
+#define NX_MMIO_BAR_0 0x02
+#define NX_MMIO_BAR_1 0x03
+#define NX_MMIO_BAR_BASE PPC_BITMASK(14,51)
+#define NX_MMIO_BAR_ENABLE PPC_BIT(52)
+#define NX_MMIO_BAR_SIZE PPC_BITMASK(53,55)
+#define NX_NODAL_BAR0 0x04
+#define NX_NODAL_BAR1 0x05
+#define NX_NODAL_BAR_ENABLE PPC_BIT(0)
+#define NX_NODAL_BAR_MASK PPC_BITMASK(1,14)
+#define NX_NODAL_BAR_BASE PPC_BITMASK(15,32)
+#define NX_GROUP_BAR0 0x06
+#define NX_GROUP_BAR1 0x07
+#define NX_GROUP_BAR_ENABLE PPC_BIT(0)
+#define NX_GROUP_BAR_MASK PPC_BITMASK(1,14)
+#define NX_GROUP_BAR_BASE PPC_BITMASK(15,32)
+#define NX_EPSILON_COUN 0x08
+#define NX_EPSILON_COUN_DISABLE PPC_BIT(6)
+#define NX_MISC_CONTROL 0x09
+#define NX_PB_DEBUG 0x0a
+#define NX_PB_ECC 0x0b
+#define NX_DEBUG_SNAPSHOT_0 0x0c
+#define NX_DEBUG_SNAPSHOT_1 0x0d
+#define NX_CS_CTL 0x0e
+#define NX_CONFIG_CQ 0x0f
+#define NX_MRBO0 0x10
+#define NX_MRBO1 0x11
+#define NX_AS_CMD_CFG 0x12
+#define NX_NP_BUID 0x13
+#define NP_BUID_ENABLE PPC_BIT(0)
+#define NP_BUID_BASE PPC_BITMASK(1,23)
+#define NX_TL_CMD_CR 0x20
+#define NX_TL_CMD_D_CR 0x21
+#define NX_TL_RSP_CR 0x22
+#define NX_TL_RSP_D_CR 0x23
+#define NX_DL_REG_ADDR 0x24
+#define NX_DL_REG_DATA 0x25
+#define NX_NTL_CONTROL 0x26
+#define NX_NTL_PMU_CONTROL 0x27
+#define NX_NTL_PMU_COUNT 0x28
+#define NX_NTL_ER_HOLD 0x29
+#define NX_NTL_FST_ERR 0x2a
+#define NX_NTL_ECC 0x2b
+#define NX_NTL_FST_MSK 0x2c
+
+/* NP AT register */
+#define NX_FIR 0x00
+#define NX_FIR_CLEAR 0x01
+#define NX_FIR_SET 0x02
+#define NX_FIR_MASK 0x03
+#define NX_FIR_MASK_CLR 0x04
+#define NX_FIR_MASK_SET 0x05
+#define NX_FIR_ACTION0 0x06
+#define NX_FIR_ACTION1 0x07
+#define NX_FIR_WOF 0x08
+#define NX_AT_PMU_CTRL 0x26
+#define NX_AT_PMU_CNT 0x27
+#define NX_AT_ERR_HOLD 0x28
+#define NX_AT_ERR_HOLD_RESET PPC_BIT(63)
+#define NX_AT_DEBUG 0x29
+#define NX_AT_ECC 0x2a
+#define NX_BAR 0x2b
+
+/* AT MMIO registers */
+#define NPU_LSI_SOURCE_ID 0x00100
+#define NPU_LSI_SRC_ID_BASE PPC_BITMASK(5,11)
+#define NPU_DMA_CHAN_STATUS 0x00110
+#define NPU_INTREP_TIMER 0x001f8
+#define NPU_DMARD_SYNC 0x00200
+#define NPU_DMARD_SYNC_START_RD PPC_BIT(0)
+#define NPU_DMARD_SYNC_RD PPC_BIT(1)
+#define NPU_DMARD_SYNC_START_WR PPC_BIT(2)
+#define NPU_DMARD_SYNC_WR PPC_BIT(3)
+#define NPU_TCE_KILL 0x00210
+#define NPU_IODA_ADDR 0x00220
+#define NPU_IODA_AD_AUTOINC PPC_BIT(0)
+#define NPU_IODA_AD_TSEL PPC_BITMASK(11,15)
+#define NPU_IODA_AD_TADR PPC_BITMASK(54,63)
+#define NPU_IODA_DATA0 0x00228
+#define NPU_XIVE_UPD 0x00248
+#define NPU_GEN_CAP 0x00250
+#define NPU_TCE_CAP 0x00258
+#define NPU_INT_CAP 0x00260
+#define NPU_EEH_CAP 0x00268
+#define NPU_VR 0x00800
+#define NPU_CTRLR 0x00810
+#define NPU_TCR 0x00880
+#define NPU_Q_DMA_R 0x00888
+#define NPU_AT_ESR 0x00c80
+#define NPU_AT_FESR 0x00c88
+#define NPU_AT_LR_ER 0x00c98
+#define NPU_AT_SI_ER 0x00ca0
+#define NPU_AT_FR_ER 0x00ca8
+#define NPU_AT_FE_ER 0x00cb0
+#define NPU_AT_ESMR 0x00cd0
+#define NPU_AT_FESMR 0x00cd8
+#define NPU_AT_I_LR0 0x00d00
+#define NPU_AT_I_LR1 0x00d08
+#define NPU_AT_I_LR2 0x00d10
+#define NPU_AT_I_LR3 0x00d18
+
+/* AT */
+#define NPU_AT_SCOM_OFFSET 0x180
+
+/* NTL */
+#define TL_CMD_CR 0x10000
+#define TL_CMD_D_CR 0x10008
+#define TL_RSP_CR 0x10010
+#define TL_RSP_D_CR 0x10018
+#define NTL_CONTROL 0x10020
+#define NTL_CONTROL_RESET PPC_BIT(0)
+
+/* IODA tables */
+#define NPU_IODA_TBL_LIST 1
+#define NPU_IODA_TBL_LXIVT 2
+#define NPU_IODA_TBL_PCT 4
+#define NPU_IODA_TBL_PESTB 8
+#define NPU_IODA_TBL_TVT 9
+#define NPU_IODA_TBL_TCD 10
+#define NPU_IODA_TBL_TDR 11
+#define NPU_IODA_TBL_PESTB_ADDR 12
+#define NPU_IODA_TBL_EA 16
+
+/* LXIVT */
+#define NPU_IODA_LXIVT_SERVER PPC_BITMASK(8,23)
+#define NPU_IODA_LXIVT_PRIORITY PPC_BITMASK(24,31)
+
+/* PCT */
+#define NPU_IODA_PCT_LINK_ENABLED PPC_BIT(0)
+#define NPU_IODA_PCT_PE PPC_BITMASK(2,3)
+
+/* TVT */
+#define NPU_IODA_TVT_TTA PPC_BITMASK(0,47)
+#define NPU_IODA_TVT_LEVELS PPC_BITMASK(48,50)
+#define NPU_IODA_TVE_1_LEVEL 0
+#define NPU_IODA_TVE_2_LEVELS 1
+#define NPU_IODA_TVE_3_LEVELS 2
+#define NPU_IODA_TVE_4_LEVELS 3
+#define NPU_IODA_TVT_SIZE PPC_BITMASK(51,55)
+#define NPU_IODA_TVT_PSIZE PPC_BITMASK(59,63)
+
+/* NDL Registers */
+#define NDL_STATUS 0xfff0
+#define NDL_CONTROL 0xfff4
+
+/* BAR Sizes */
+#define NX_MMIO_PL_SIZE 0x200000
+#define NX_MMIO_AT_SIZE 0x10000
+#define NX_MMIO_DL_SIZE 0x20000
+
+/* Translates a PHY SCOM address to an MMIO offset */
+#define PL_MMIO_ADDR(reg) (((reg >> 32) & 0xfffffull) << 1)
+
+/* PHY register scom offsets & fields */
+#define RX_PR_CNTL_PL 0x0002180000000000
+#define RX_PR_RESET PPC_BIT(63)
+
+#define TX_MODE1_PL 0x0004040000000000
+#define TX_LANE_PDWN PPC_BIT(48)
+
+#define TX_MODE2_PL 0x00040c0000000000
+#define TX_RXCAL PPC_BIT(57)
+#define TX_UNLOAD_CLK_DISABLE PPC_BIT(56)
+
+#define TX_CNTL_STAT2 0x00041c0000000000
+#define TX_FIFO_INIT PPC_BIT(48)
+
+#define RX_BANK_CONTROLS 0x0000f80000000000
+#define RX_LANE_ANA_PDWN PPC_BIT(54)
+
+#define RX_MODE 0x0002000000000000
+#define RX_LANE_DIG_PDWN PPC_BIT(48)
+
+#define RX_PR_MODE 0x0002100000000000
+#define RX_PR_PHASE_STEP PPC_BITMASK(60, 63)
+
+#define RX_A_DAC_CNTL 0x0000080000000000
+#define RX_PR_IQ_RES_SEL PPC_BITMASK(58, 60)
+
+#define RX_LANE_BUSY_VEC_0_15 0x000b000000000000
+#define TX_FFE_TOTAL_2RSTEP_EN 0x000c240000000000
+#define TX_FFE_TOTAL_ENABLE_P_ENC PPC_BITMASK(49,55)
+#define TX_FFE_TOTAL_ENABLE_N_ENC PPC_BITMASK(57,63)
+#define TX_FFE_PRE_2RSTEP_SEL 0x000c2c0000000000
+#define TX_FFE_PRE_P_SEL_ENC PPC_BITMASK(51,54)
+#define TX_FFE_PRE_N_SEL_ENC PPC_BITMASK(59,62)
+#define TX_FFE_MARGIN_2RSTEP_SEL 0x000c34000000000
+#define TX_FFE_MARGIN_PU_P_SEL_ENC PPC_BITMASK(51,55)
+#define TX_FFE_MARGIN_PD_N_SEL_ENC PPC_BITMASK(59,63)
+#define TX_IORESET_VEC_0_15 0x000d2c0000000000
+#define TX_IMPCAL_PB 0x000f040000000000
+#define TX_ZCAL_REQ PPC_BIT(49)
+#define TX_ZCAL_DONE PPC_BIT(50)
+#define TX_ZCAL_ERROR PPC_BIT(51)
+#define TX_IMPCAL_NVAL_PB 0x000f0c0000000000
+#define TX_ZCAL_N PPC_BITMASK(48,56)
+#define TX_IMPCAL_PVAL_PB 0x000f140000000000
+#define TX_ZCAL_P PPC_BITMASK(48,56)
+#define RX_EO_STEP_CNTL_PG 0x0008300000000000
+#define RX_EO_ENABLE_LATCH_OFFSET_CAL PPC_BIT(48)
+#define RX_EO_ENABLE_CM_COARSE_CAL PPC_BIT(57)
+#define RX_RUN_LANE_VEC_0_15 0x0009b80000000000
+#define RX_RECAL_ABORT_VEC_0_15 0x0009c80000000000
+#define RX_IORESET_VEC_0_15 0x0009d80000000000
+#define RX_EO_RECAL_PG 0x000a800000000000
+#define RX_INIT_DONE_VEC_0_15 0x000ac00000000000
+#define TX_IMPCAL_SWO1_PB 0x000f240000000000
+#define TX_ZCAL_SWO_EN PPC_BIT(48)
+#define TX_IMPCAL_SWO2_PB 0x000f2c0000000000
+
+#endif /* __NPU_REGS_H */
diff --git a/include/npu.h b/include/npu.h
new file mode 100644
index 0000000..389b732
--- /dev/null
+++ b/include/npu.h
@@ -0,0 +1,214 @@
+/* Copyright 2013-2015 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NPU_H
+#define __NPU_H
+
+/* Number of PEs supported */
+#define NPU_NUM_OF_PES 4
+
+/* Each brick has 2 MMIO BARs at the maximum. BAR0 is always used to
+ * map the 128KB TL/DL registers. BAR1 is used to map either the PL or
+ * the AT registers which are not exposed to the OS.
+ */
+#define NPU_BRICK_NUM_OF_BARS 2
+#define NPU_BRICK_TL_BAR_SIZE 0x20000
+#define NPU_BRICK_PL_BAR_SIZE 0x200000
+
+/* The config space of NPU device is emulated. We have different
+ * bits to represent config register properties: readonly, write-
+ * one-to-clear.
+ */
+#define NPU_DEV_CFG_NORMAL 0
+#define NPU_DEV_CFG_RDONLY 1
+#define NPU_DEV_CFG_W1CLR 2
+#define NPU_DEV_CFG_MAX 3
+
+/* Bytes of the emulated NPU PCI device config space. We are
+ * emulating PCI express device, not legacy one
+ */
+#define NPU_DEV_CFG_SIZE 0x100
+
+/* Interrupt mapping
+ *
+ * NPU PHB doesn't support MSI interrupts. It only supports
+ * 8 LSI interrupts: [0, 3] for bricks' DL blocks. [4, 5]
+ * for reporting errors from DL blocks. [6, 7] for reporting
+ * errors from TL blocks, NPCQs and AT.
+ */
+#define NPU_LSI_IRQ_COUNT 8
+#define NPU_LSI_INT_DL0 0
+#define NPU_LSI_INT_DL1 1
+#define NPU_LSI_INT_DL2 2
+#define NPU_LSI_INT_DL3 3
+#define NPU_LSI_IRQ_MIN 0x7F0
+#define NPU_LSI_IRQ_MAX (NPU_LSI_IRQ_MIN + NPU_LSI_IRQ_COUNT - 1)
+#define NPU_LSI_IRQ_BASE(chip, phb) (P8_CHIP_IRQ_PHB_BASE(chip, phb) | NPU_LSI_IRQ_MIN)
+#define NPU_IRQ_NUM(irq) (irq & 0x7FF)
+
+/* NPU device capability descriptor. All PCI capabilities is
+ * organized as linked list. Each PCI capability has specific
+ * hook to populate when initializing NPU device.
+ */
+struct npu_dev;
+struct npu_dev_cap {
+ uint16_t id;
+ uint16_t start;
+ uint16_t end;
+ struct npu_dev *dev;
+ void (*populate)(struct npu_dev_cap *cap);
+ struct list_node link;
+};
+
+/* Config space access trap. */
+struct npu_dev_trap {
+ struct npu_dev *dev;
+ uint32_t start;
+ uint32_t end;
+ void *data;
+ int64_t (*read)(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t *data);
+ int64_t (*write)(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t data);
+ struct list_node link;
+};
+
+struct npu_dev_bar {
+ uint32_t flags;
+ uint32_t xscom;
+ uint64_t base;
+ uint64_t size;
+ uint32_t bar_sz;
+ bool trapped;
+};
+
+/* Each device contains 2 links. The device will be exposed as
+ * standard PCIE device and the config space is emulated by skiboot.
+ */
+struct npu_dev {
+ uint32_t flags;
+ uint32_t index;
+ uint64_t xscom;
+ void *pl_base;
+ uint64_t pl_xscom_base;
+ struct npu_dev_bar bar;
+ struct phb *phb;
+
+ /* Device and function numbers are allocated based on GPU
+ * association */
+ uint32_t bdfn;
+
+ /* The link@x node */
+ struct dt_node *dt_node;
+
+ /* The GPU PCI device this NPU device is associated with */
+ struct pci_device *pd;
+
+ struct npu *npu;
+ uint8_t *config[NPU_DEV_CFG_MAX];
+ struct list_head capabilities;
+ struct list_head traps;
+
+ /* Which PHY lanes this device is associated with */
+ uint16_t lane_mask;
+
+ /* Used to store the currently running procedure number for
+ * this device. */
+ uint16_t procedure_number;
+
+ /* Used to store the step within a procedure that we are up
+ * to. */
+ uint16_t procedure_step;
+
+ /* Arbitrary data used by each procedure to track status. */
+ uint64_t procedure_data;
+
+ /* Used to timeout long running procedures. */
+ unsigned long procedure_tb;
+
+ uint32_t procedure_status;
+
+ uint8_t pe_num;
+};
+
+/* NPU PHB descriptor */
+struct npu {
+ uint32_t flags;
+ uint32_t index;
+ struct lock lock;
+ uint32_t chip_id;
+ uint64_t xscom_base;
+ uint64_t at_xscom;
+ void *at_regs;
+ uint32_t base_lsi;
+ uint64_t mm_base;
+ uint64_t mm_size;
+ uint32_t total_devices;
+ struct npu_dev *devices;
+
+ /* IODA cache */
+ uint64_t lxive_cache[8];
+ uint64_t pce_cache[6];
+ uint64_t tve_cache[NPU_NUM_OF_PES];
+
+ bool tx_zcal_complete[2];
+ bool fenced;
+
+ struct phb phb;
+};
+
+static inline struct npu *phb_to_npu(struct phb *phb)
+{
+ return container_of(phb, struct npu, phb);
+}
+
+static inline void npu_ioda_sel(struct npu *p, uint32_t table,
+ uint32_t addr, bool autoinc)
+{
+ out_be64(p->at_regs + NPU_IODA_ADDR,
+ (autoinc ? NPU_IODA_AD_AUTOINC : 0) |
+ SETFIELD(NPU_IODA_AD_TSEL, 0ul, table) |
+ SETFIELD(NPU_IODA_AD_TADR, 0ul, addr));
+}
+
+void npu_scom_init(struct npu_dev *dev);
+
+int64_t npu_dev_procedure_read(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t *data);
+
+int64_t npu_dev_procedure_write(struct npu_dev_trap *trap,
+ uint32_t offset,
+ uint32_t size,
+ uint32_t data);
+
+#define NPUDBG(p, fmt, a...) prlog(PR_DEBUG, "NPU%d: " fmt, \
+ (p)->phb.opal_id, ##a)
+#define NPUINF(p, fmt, a...) prlog(PR_INFO, "NPU%d: " fmt, \
+ (p)->phb.opal_id, ##a)
+#define NPUERR(p, fmt, a...) prlog(PR_ERR, "NPU%d: " fmt, \
+ (p)->phb.opal_id, ##a)
+
+#define NPUDEVDBG(p, fmt, a...) NPUDBG((p)->npu, fmt, ##a)
+#define NPUDEVINF(p, fmt, a...) NPUINF((p)->npu, fmt, ##a)
+#define NPUDEVERR(p, fmt, a...) NPUERR((p)->npu, fmt, ##a)
+
+#endif /* __NPU_H */
diff --git a/include/opal-internal.h b/include/opal-internal.h
index 3194676..5e41e10 100644
--- a/include/opal-internal.h
+++ b/include/opal-internal.h
@@ -24,8 +24,8 @@
/* An opal table entry */
struct opal_table_entry {
void *func;
- __be32 token;
- __be32 nargs;
+ u32 token;
+ u32 nargs;
};
#define opal_call(__tok, __func, __nargs) \
@@ -46,12 +46,12 @@ static struct opal_table_entry __e_##__func __used __section(".opal_table") = \
extern struct opal_table_entry __opal_table_start[];
extern struct opal_table_entry __opal_table_end[];
-extern __be64 opal_pending_events;
+extern uint64_t opal_pending_events;
extern struct dt_node *opal_node;
extern void opal_table_init(void);
-extern void opal_update_pending_evt(__be64 evt_mask, __be64 evt_values);
+extern void opal_update_pending_evt(uint64_t evt_mask, uint64_t evt_values);
__be64 opal_dynamic_event_alloc(void);
void opal_dynamic_event_free(__be64 event);
extern void add_opal_node(void);
@@ -59,7 +59,7 @@ extern void add_opal_node(void);
#define opal_register(token, func, nargs) \
__opal_register((token) + 0*sizeof(func(__test_args##nargs)), \
(func), (nargs))
-extern void __opal_register(__be64 token, void *func, unsigned num_args);
+extern void __opal_register(uint64_t token, void *func, unsigned num_args);
/* Warning: no locking at the moment, do at init time only
*
diff --git a/include/p5ioc2-regs.h b/include/p5ioc2-regs.h
deleted file mode 100644
index 1628f7a..0000000
--- a/include/p5ioc2-regs.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/* Copyright 2013-2014 IBM Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __P5IOC2_REGS_H
-#define __P5IOC2_REGS_H
-
-/*
- * IO HUB registers
- *
- * Most (all) of those registers support an AND access
- * at address + 0x1000 and an OR access at address + 0x2000
- */
-#define P5IOC2_REG_AND 0x1000
-#define P5IOC2_REG_OR 0x2000
-
-/* Internal BARs */
-#define P5IOC2_BAR0 0x0100
-#define P5IOC2_BAR1 0x0108
-#define P5IOC2_BAR2 0x0110
-#define P5IOC2_BAR3 0x0118
-#define P5IOC2_BAR4 0x0120
-#define P5IOC2_BAR5 0x0128
-#define P5IOC2_BAR6 0x0130
-#define P5IOC2_BAR7 0x0138
-#define P5IOC2_BARM0 0x0180
-#define P5IOC2_BARM1 0x0188
-#define P5IOC2_BARM2 0x0190
-#define P5IOC2_BARM3 0x0198
-#define P5IOC2_BARM4 0x01a0
-#define P5IOC2_BARM5 0x01a8
-#define P5IOC2_BARM6 0x01b0
-#define P5IOC2_BARM7 0x01b8
-#define P5IOC2_BAR(n) (0x100 + ((n) << 3))
-#define P5IOC2_BARM(n) (0x180 + ((n) << 3))
-
-/* Routing table */
-#define P5IOC2_TxRTE(x,n) (0x200 + ((x) << 7) + ((n) << 3))
-#define P5IOC2_TxRTE_VALID PPC_BIT(47)
-
-/* BUID routing table */
-#define P5IOC2_BUIDRTE(n) (0x600 + ((n) << 3))
-#define P5IOC2_BUIDRTE_VALID PPC_BIT(47)
-#define P5IOC2_BUIDRTE_RR_EOI PPC_BIT(48)
-#define P5IOC2_BUIDRTE_RR_RET PPC_BIT(49)
-
-/* Others */
-#define P5IOC2_FIRMC 0x0008 /* FIR Mask Checkstop */
-#define P5IOC2_CTL 0x0030 /* Control register part 1 */
-#define P5IOC2_CTL2 0x00c8 /* Control register part 2 */
-#define P5IOC2_DIRA 0x0090 /* Cache dir. address */
-#define P5IOC2_DIRD 0x0098 /* Cache dir. data */
-#define P5IOC2_IBASE 0x0048 /* Interrupt base address */
-#define P5IOC2_IRBM 0x00d8 /* Interrupt re-issue broadcast mask */
-#define P5IOC2_SID 0x0038 /* P5IOC2 ID register */
-#define P5IOC2_SID_BUID_BASE PPC_BITMASK(14,22)
-#define P5IOC2_SID_BUID_MASK PPC_BITMASK(27,30)
-#define P5IOC2_SBUID 0x00f8 /* P5IOC2 HUB BUID */
-
-/* XIPM area */
-#define P5IOC2_BUCO 0x40008
-#define P5IOC2_MIIP 0x40000
-#define P5IOC2_XINM 0x40010
-
-/* Xin/Xout area */
-#define P5IOC2_XIXO 0xf0030
-#define P5IOC2_XIXO_ENH_TCE PPC_BIT(0)
-
-/*
- * Calgary registers
- *
- * CA0 is PCI-X and CA1 is PCIE, though the type can be discovered
- * from registers so we'll simply let it do so
- */
-
-#define CA_CCR 0x108
-#define CA_DEVBUID 0x118
-#define CA_DEVBUID_MASK PPC_BITMASK32(7,15)
-#define CA_TAR0 0x580
-#define CA_TAR_HUBID PPC_BITMASK(0,5)
-#define CA_TAR_ALTHUBID PPC_BITMASK(6,11)
-#define CA_TAR_TCE_ADDR PPC_BITMASK(16,48)
-#define CA_TAR_VALID PPC_BIT(60)
-#define CA_TAR_NUM_TCE PPC_BITMASK(61,63)
-#define CA_TAR1 0x588
-#define CA_TAR2 0x590
-#define CA_TAR3 0x598
-#define CA_TARn(n) (0x580 + ((n) << 3))
-
-#define CA_PHBID0 0x650
-#define CA_PHBID_PHB_ENABLE PPC_BIT32(0)
-#define CA_PHBID_ADDRSPACE_ENABLE PPC_BIT32(1)
-#define CA_PHBID_PHB_TYPE PPC_BITMASK32(4,7)
-#define CA_PHBTYPE_PCIX1_0 0
-#define CA_PHBTYPE_PCIX2_0 1
-#define CA_PHBTYPE_PCIE_G1 4
-#define CA_PHBTYPE_PCIE_G2 5
-/* PCI-X bits */
-#define CA_PHBID_XMODE_EMBEDDED PPC_BIT32(8)
-#define CA_PHBID_XBUS_64BIT PPC_BIT32(9)
-#define CA_PHBID_XBUS_266MHZ PPC_BIT32(10)
-/* PCI-E bits */
-#define CA_PHBID_EWIDTH PPC_BITMASK32(8,10)
-#define CA_PHB_EWIDTH_X4 0
-#define CA_PHB_EWIDTH_X8 1
-#define CA_PHB_EWIDTH_X16 2
-#define CA_PHBID1 0x658
-#define CA_PHBID2 0x660
-#define CA_PHBID3 0x668
-#define CA_PHBIDn(n) (0x650 + ((n) << 3))
-
-/* PHB n reg base inside CA */
-#define CA_PHBn_REGS(n) (0x8000 + ((n) << 12))
-
-/*
- * P5IOC2 PHB registers
- */
-#define CAP_BUID 0x100
-#define CAP_BUID_MASK PPC_BITMASK32(7,15)
-#define CAP_MSIBASE 0x108 /* Undocumented ! */
-#define CAP_DMACSR 0x110
-#define CAP_PLSSR 0x120
-#define CAP_PCADR 0x140
-#define CAP_PCADR_ENABLE PPC_BIT32(0)
-#define CAP_PCADR_FUNC PPC_BITMASK32(21,23)
-#define CAP_PCADR_BDFN PPC_BITMASK32(8,23) /* bus,dev,func */
-#define CAP_PCADR_EXTOFF PPC_BITMASK32(4,7)
-#define CAP_PCDAT 0x130
-#define CAP_PCFGRW 0x160
-#define CAP_PCFGRW_ERR_RECOV_EN PPC_BIT32(1)
-#define CAP_PCFGRW_TCE_EN PPC_BIT32(2)
-#define CAP_PCFGRW_FREEZE_EN PPC_BIT32(3)
-#define CAP_PCFGRW_MMIO_FROZEN PPC_BIT32(4)
-#define CAP_PCFGRW_DMA_FROZEN PPC_BIT32(5)
-#define CAP_PCFGRW_ENHANCED_CFG_EN PPC_BIT32(6)
-#define CAP_PCFGRW_DAC_DISABLE PPC_BIT32(7)
-#define CAP_PCFGRW_2ND_MEM_SPACE_EN PPC_BIT32(9)
-#define CAP_PCFGRW_MASK_PLSSR_IRQ PPC_BIT32(10)
-#define CAP_PCFGRW_MASK_CSR_IRQ PPC_BIT32(11)
-#define CAP_PCFGRW_IO_SPACE_DIABLE PPC_BIT32(12)
-#define CAP_PCFGRW_SZ_MASK_IS_LIMIT PPC_BIT32(13)
-#define CAP_PCFGRW_MSI_EN PPC_BIT32(14)
-#define CAP_IOAD_L 0x170
-#define CAP_IOAD_H 0x180
-#define CAP_MEM1_L 0x190
-#define CAP_MEM1_H 0x1a0
-#define CAP_IOSZ 0x1b0
-#define CAP_MSZ1 0x1c0
-#define CAP_MEM_ST 0x1d0
-#define CAP_IO_ST 0x1e0
-#define CAP_AER 0x200
-#define CAP_BPR 0x210
-#define CAP_CRR 0x270
-#define CAP_CRR_RESET1 PPC_BIT32(0)
-#define CAP_CRR_RESET2 PPC_BIT32(1)
-#define CAP_XIVR0 0x400
-#define CAP_XIVR_PRIO 0x000000ff
-#define CAP_XIVR_SERVER 0x0000ff00
-#define CAP_XIVRn(n) (0x400 + ((n) << 4))
-#define CAP_MVE0 0x500
-#define CAP_MVE_VALID PPC_BIT32(0)
-#define CAP_MVE_TBL_OFF PPC_BITMASK32(13,15)
-#define CAP_MVE_NUM_INT PPC_BITMASK32(18,19)
-#define CAP_MVE1 0x510
-#define CAP_MODE0 0x880
-#define CAP_MODE1 0x890
-#define CAP_MODE2 0x8a0
-#define CAP_MODE3 0x8b0
-
-/*
- * SHPC Registers
- */
-#define SHPC_LOGICAL_SLOT 0xb40
-#define SHPC_LOGICAL_SLOT_STATE 0x00000003
-#define SHPC_SLOT_STATE_POWER_ONLY 1
-#define SHPC_SLOT_STATE_ENABLED 2
-#define SHPC_SLOT_STATE_DISABLED 3
-#define SHPC_LOGICAL_SLOT_PRSNT 0x000000c00
-#define SHPC_SLOT_PRSTN_7_5W 0
-#define SHPC_SLOT_PRSTN_25W 1
-#define SHPC_SLOT_STATE_15W 2
-#define SHPC_SLOT_STATE_EMPTY 3
-
-/* UTL registers */
-#define UTL_SYS_BUS_CONTROL 0xc00
-#define UTL_STATUS 0xc04
-#define UTL_SYS_BUS_AGENT_STATUS 0xc08
-#define UTL_SYS_BUS_AGENT_ERR_EN 0xc0c
-#define UTL_SYS_BUS_AGENT_IRQ_EN 0xc10
-#define UTL_SYS_BUS_BURST_SZ_CONF 0xc20
-#define UTL_REVISION_ID 0xc24
-#define UTL_TX_NON_POST_DEBUG_STAT1 0xc30
-#define UTL_TX_NON_POST_DEBUG_STAT2 0xc34
-#define UTL_GBIF_READ_REQ_DEBUG 0xc38
-#define UTL_GBIF_WRITE_REQ_DEBUG 0xc3c
-#define UTL_GBIF_TX_COMP_DEBUG 0xc40
-#define UTL_GBIF_RX_COMP_DEBUG 0xc44
-#define UTL_OUT_POST_HDR_BUF_ALLOC 0xc60
-#define UTL_OUT_POST_DAT_BUF_ALLOC 0xc68
-#define UTL_IN_POST_HDR_BUF_ALLOC 0xc70
-#define UTL_IN_POST_DAT_BUF_ALLOC 0xc78
-#define UTL_OUT_NP_BUF_ALLOC 0xc80
-#define UTL_IN_NP_BUF_ALLOC 0xc88
-#define UTL_PCIE_TAGS_ALLOC 0xc90
-#define UTL_GBIF_READ_TAGS_ALLOC 0xc98
-#define UTL_PCIE_PORT_CONTROL 0xca0
-#define UTL_PCIE_PORT_STATUS 0xca4
-#define UTL_PCIE_PORT_ERR_EN 0xca8
-#define UTL_PCIE_PORT_IRQ_EN 0xcac
-#define UTL_RC_STATUS 0xcb0
-#define UTL_RC_ERR_EN 0xcb4
-#define UTL_RC_IRQ_EN 0xcb8
-#define UTL_PCI_PM_CONTROL 0xcc8
-#define UTL_PCIE_PORT_ID 0xccc
-#define UTL_TLP_DEBUG 0xcd0
-#define UTL_VC_CTL_DEBUG 0xcd4
-#define UTL_NP_BUFFER_DEBUG 0xcd8
-#define UTL_POSTED_BUFFER_DEBUG 0xcdc
-#define UTL_TX_FIFO_DEBUG 0xce0
-#define UTL_TLP_COMPL_DEBUG 0xce4
-
-#endif /* __P5IOC2_REGS_H */
diff --git a/include/p5ioc2.h b/include/p5ioc2.h
deleted file mode 100644
index fb9ed1b..0000000
--- a/include/p5ioc2.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/* Copyright 2013-2014 IBM Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __P5IOC2_H
-#define __P5IOC2_H
-
-#include <stdint.h>
-#include <cec.h>
-#include <io.h>
-#include <cec.h>
-#include <pci.h>
-#include <lock.h>
-#include <device.h>
-
-#include <ccan/container_of/container_of.h>
-
-/*
- * Various definitions which are the result of various
- * things we have hard wired (routing etc...)
- */
-
-/* It looks like our registers are at an offset from GX BAR 0 ... */
-#define P5IOC2_REGS_OFFSET 0x01F00000
-
-#define P5IOC2_CA0_REG_OFFSET 0 /* From BAR6, R0 */
-#define P5IOC2_CA1_REG_OFFSET 0x01000000 /* From BAR6, R1 */
-#define P5IOC2_CA0_MM_OFFSET 0 /* From BAR0, R0 and 1 */
-#define P5IOC2_CA1_MM_OFFSET 0x400000000ul /* From BAR0, R1 and 2 */
-#define P5IOC2_CA_PHB_COUNT 4
-#define P5IOC2_CA0_RIO_ID 2
-#define P5IOC2_CA1_RIO_ID 3
-#define P5IOC2_CA0_BUID 0x10
-#define P5IOC2_CA1_BUID 0x20
-
-/*
- * Our memory space is slightly different than pHyp
- * (or even BML). We do as follow:
- *
- * - IO space is in the Calgary MMIO, at (phb_index +1) * 1M
- * (pHyp seems to mangle the IO space location) and is always
- * 1M in size mapping to PCI 0
- *
- * - Memory space is in the BAR0 mapped region. Each PHB gets
- * allocated a 4G window at base + (phb_index * 4G). It uses
- * a portion of that space based on the chosen size of the
- * MMIO space, typically 2G.
- */
-#define MM_WINDOW_SIZE 0x100000000ul
-#define MM_PCI_START 0x80000000
-#define MM_PCI_SIZE 0x80000000
-#define IO_PCI_START 0x00000000
-#define IO_PCI_SIZE 0x00100000
-
-/*
- * CAn interrupts
- *
- * Within Calgary BUID space
- */
-#define P5IOC2_CA_HOST_IRQ 0
-#define P5IOC2_CA_SPCN_IRQ 1
-#define P5IOC2_CA_PERF_IRQ 2
-
-/*
- * The PHB states are similar to P7IOC, see the explanation
- * in p7ioc.h
- */
-enum p5ioc2_phb_state {
- /* First init state */
- P5IOC2_PHB_STATE_UNINITIALIZED,
-
- /* During PHB HW inits */
- P5IOC2_PHB_STATE_INITIALIZING,
-
- /* Set if the PHB is for some reason unusable */
- P5IOC2_PHB_STATE_BROKEN,
-
- /* Normal PHB functional state */
- P5IOC2_PHB_STATE_FUNCTIONAL,
-};
-
-/*
- * Structure for a PHB
- */
-
-struct p5ioc2;
-
-struct p5ioc2_phb {
- bool active; /* Is this PHB functional ? */
- bool is_pcie;
- uint8_t ca; /* CA0 or CA1 */
- uint8_t index; /* 0..3 index inside CA */
- void *ca_regs; /* Calgary regs */
- void *regs; /* PHB regs */
- struct lock lock;
- uint32_t buid;
- uint64_t mm_base;
- uint64_t io_base;
- int64_t ecap; /* cached PCI-E cap offset */
- int64_t aercap; /* cached AER ecap offset */
- enum p5ioc2_phb_state state;
- uint64_t delay_tgt_tb;
- uint64_t retries;
- uint64_t xive_cache[16];
- struct p5ioc2 *ioc;
- struct phb phb;
-};
-
-static inline struct p5ioc2_phb *phb_to_p5ioc2_phb(struct phb *phb)
-{
- return container_of(phb, struct p5ioc2_phb, phb);
-}
-
-extern void p5ioc2_phb_setup(struct p5ioc2 *ioc, struct p5ioc2_phb *p,
- uint8_t ca, uint8_t index, bool active,
- uint32_t buid);
-
-/*
- * State structure for P5IOC2 IO HUB
- */
-struct p5ioc2 {
- /* Device node */
- struct dt_node *dt_node;
-
- /* MMIO regs for the chip */
- void *regs;
-
- /* BAR6 (matches GX BAR 1) is used for internal Calgary MMIO and
- * for PCI IO space.
- */
- uint64_t bar6;
-
- /* BAR0 (matches GX BAR 2) is used for PCI memory space */
- uint64_t bar0;
-
- /* Calgary 0 and 1 registers. We assume their BBAR values as such
- * that CA0 is at bar6 and CA1 at bar6 + 16M
- */
- void* ca0_regs;
- void* ca1_regs;
-
- /* The large MM regions assigned off bar0 to CA0 and CA1 for use
- * by their PHBs (16G each)
- */
- uint64_t ca0_mm_region;
- uint64_t ca1_mm_region;
-
- /* BUID base for the PHB. This does include the top bits
- * (chip, GX bus ID, etc...). This is initialized from the
- * SPIRA.
- */
- uint32_t buid_base;
-
- /* TCE region set by the user */
- uint64_t tce_base;
- uint64_t tce_size;
-
- /* Calgary 0 and 1 PHBs */
- struct p5ioc2_phb ca0_phbs[P5IOC2_CA_PHB_COUNT];
- struct p5ioc2_phb ca1_phbs[P5IOC2_CA_PHB_COUNT];
-
- uint32_t host_chip;
- uint32_t gx_bus;
- struct io_hub hub;
-};
-
-static inline struct p5ioc2 *iohub_to_p5ioc2(struct io_hub *hub)
-{
- return container_of(hub, struct p5ioc2, hub);
-}
-
-#endif /* __P5IOC2_H */
diff --git a/include/pci-cfg.h b/include/pci-cfg.h
index c705d25..27c0f74 100644
--- a/include/pci-cfg.h
+++ b/include/pci-cfg.h
@@ -113,6 +113,9 @@
#define PCICAP_SUBSYS_VID_VENDOR 4
#define PCICAP_SUBSYS_VID_DEVICE 6
+/* Vendor specific capability */
+#define PCI_CFG_CAP_ID_VENDOR 9
+
/* PCI Express capability */
#define PCI_CFG_CAP_ID_EXP 0x10
/* PCI Express capability fields */
@@ -483,4 +486,14 @@
#define PCIECAP_AER_TLP_PFX_LOG2 0x40
#define PCIECAP_AER_TLP_PFX_LOG3 0x44
+/* Vendor specific extend capability */
+#define PCIECAP_ID_VNDR 0x0b
+#define PCIECAP_VNDR_HDR 0x04
+#define PCIECAP_VNDR_HDR_ID_MASK 0x0000ffff
+#define PCIECAP_VNDR_HDR_ID_LSH 0
+#define PCIECAP_VNDR_HDR_REV_MASK 0x000f0000
+#define PCIECAP_VNDR_HDR_REV_LSH 16
+#define PCIECAP_VNDR_HDR_LEN_MASK 0xfff00000
+#define PCIECAP_VNDR_HDR_LEN_LSH 20
+
#endif /* __PCI_CFG_H */
diff --git a/include/pci.h b/include/pci.h
index 2385163..4640dda 100644
--- a/include/pci.h
+++ b/include/pci.h
@@ -90,6 +90,25 @@ struct pci_slot_info {
int slot_index;
};
+struct pci_device;
+struct pci_cfg_reg_filter;
+
+typedef void (*pci_cfg_reg_func)(struct pci_device *pd,
+ struct pci_cfg_reg_filter *pcrf,
+ uint32_t offset, uint32_t len,
+ uint32_t *data, bool write);
+struct pci_cfg_reg_filter {
+ uint32_t flags;
+#define PCI_REG_FLAG_READ 0x1
+#define PCI_REG_FLAG_WRITE 0x2
+#define PCI_REG_FLAG_MASK 0x3
+ uint32_t start;
+ uint32_t len;
+ uint8_t *data;
+ pci_cfg_reg_func func;
+ struct list_node link;
+};
+
/*
* While this might not be necessary in the long run, the existing
* Linux kernels expect us to provide a device-tree that contains
@@ -119,10 +138,18 @@ struct pci_device {
uint8_t subordinate_bus;
uint32_t scan_map;
+ uint32_t vdid;
+ uint32_t sub_vdid;
+ uint32_t class;
uint64_t cap_list;
uint32_t cap[64];
uint32_t mps; /* Max payload size capability */
+ uint32_t pcrf_start;
+ uint32_t pcrf_end;
+ struct list_head pcrf;
+
+ struct dt_node *dn;
struct pci_slot_info *slot_info;
struct pci_device *parent;
struct list_head children;
@@ -172,14 +199,14 @@ struct pci_lsi_state {
#define MAX_INT_SIZE 2
uint32_t int_size; /* #cells */
uint32_t int_val[4][MAX_INT_SIZE]; /* INTA...INTD */
- uint32_t int_parent[4];
+ uint32_t int_parent[4];
};
/*
* NOTE: All PCI functions return negative OPAL error codes
*
* In addition, some functions may return a positive timeout
- * value or some other state information, see the description
+ * value or some other state information, see the description
* of individual functions. If nothing is specified, it's
* just an error code or 0 (success).
*
@@ -240,6 +267,12 @@ struct phb_ops {
void (*device_init)(struct phb *phb, struct pci_device *device);
/*
+ * Device node fixup is called when the PCI device node is being
+ * populated
+ */
+ void (*device_node_fixup)(struct phb *phb, struct pci_device *pd);
+
+ /*
* EEH methods
*
* The various arguments are identical to the corresponding
@@ -491,9 +524,15 @@ extern struct pci_device *pci_walk_dev(struct phb *phb,
void *userdata);
extern struct pci_device *pci_find_dev(struct phb *phb, uint16_t bdfn);
extern void pci_restore_bridge_buses(struct phb *phb);
+extern struct pci_cfg_reg_filter *pci_find_cfg_reg_filter(struct pci_device *pd,
+ uint32_t start, uint32_t len);
+extern struct pci_cfg_reg_filter *pci_add_cfg_reg_filter(struct pci_device *pd,
+ uint32_t start, uint32_t len,
+ uint32_t flags, pci_cfg_reg_func func);
/* Manage PHBs */
-extern int64_t pci_register_phb(struct phb *phb);
+#define OPAL_DYNAMIC_PHB_ID (~0)
+extern int64_t pci_register_phb(struct phb *phb, int opal_id);
extern int64_t pci_unregister_phb(struct phb *phb);
extern struct phb *pci_get_phb(uint64_t phb_id);
static inline void pci_put_phb(struct phb *phb __unused) { }
diff --git a/include/phb3.h b/include/phb3.h
index b2aae15..44ac52b 100644
--- a/include/phb3.h
+++ b/include/phb3.h
@@ -52,9 +52,9 @@
#define PHB3_LSI_IRQ_COUNT 8
#define PHB3_LSI_IRQ_MAX (PHB3_LSI_IRQ_MIN+PHB3_LSI_IRQ_COUNT-1)
-#define PHB3_MSI_IRQ_BASE(chip, phb) (P8_CHIP_IRQ_PHB_BASE(chip, phb) | \
+#define PHB3_MSI_IRQ_BASE(chip, phb) (p8_chip_irq_phb_base(chip, phb) | \
PHB3_MSI_IRQ_MIN)
-#define PHB3_LSI_IRQ_BASE(chip, phb) (P8_CHIP_IRQ_PHB_BASE(chip, phb) | \
+#define PHB3_LSI_IRQ_BASE(chip, phb) (p8_chip_irq_phb_base(chip, phb) | \
PHB3_LSI_IRQ_MIN)
#define PHB3_IRQ_NUM(irq) (irq & 0x7FF)
diff --git a/include/skiboot.h b/include/skiboot.h
index 4eec6db..a85fafc 100644
--- a/include/skiboot.h
+++ b/include/skiboot.h
@@ -197,15 +197,16 @@ extern void init_shared_sprs(void);
extern void init_replicated_sprs(void);
/* Various probe routines, to replace with an initcall system */
-extern void probe_p5ioc2(void);
extern void probe_p7ioc(void);
extern void probe_phb3(void);
extern int phb3_preload_capp_ucode(void);
extern void phb3_preload_vpd(void);
+extern void probe_npu(void);
extern void uart_init(bool enable_interrupt);
extern void homer_init(void);
extern void occ_pstates_init(void);
extern void slw_init(void);
+extern void add_cpu_idle_state_properties(void);
extern void occ_fsp_init(void);
extern void lpc_rtc_init(void);
@@ -264,4 +265,3 @@ extern bool slw_timer_ok(void);
extern void fake_rtc_init(void);
#endif /* __SKIBOOT_H */
-
diff --git a/include/types.h b/include/types.h
index 36dc81d..07a9db6 100644
--- a/include/types.h
+++ b/include/types.h
@@ -17,11 +17,12 @@
#ifndef __TYPES_H
#define __TYPES_H
#include <ccan/short_types/short_types.h>
+#include <ccan/endian/endian.h>
/* These are currently just for clarity, but we could apply sparse. */
-typedef u16 __be16;
-typedef u32 __be32;
-typedef u64 __be64;
+typedef beint16_t __be16;
+typedef beint32_t __be32;
+typedef beint64_t __be64;
#endif /* __TYPES_H */
diff --git a/libc/stdio/Makefile.inc b/libc/stdio/Makefile.inc
index 7c3cb08..d2aee0f 100644
--- a/libc/stdio/Makefile.inc
+++ b/libc/stdio/Makefile.inc
@@ -13,9 +13,9 @@
SUBDIRS += $(LIBCDIR)/stdio
-STDIO_OBJS = fscanf.o vfprintf.o vsnprintf.o fprintf.o \
- setvbuf.o fputc.o puts.o fputs.o putchar.o scanf.o \
- stdchnls.o vfscanf.o vsscanf.o fileno.o snprintf.o
+STDIO_OBJS = vfprintf.o vsnprintf.o fprintf.o \
+ setvbuf.o fputc.o puts.o fputs.o putchar.o \
+ stdchnls.o fileno.o snprintf.o
STDIO = $(LIBCDIR)/stdio/built-in.o
$(STDIO): $(STDIO_OBJS:%=$(LIBCDIR)/stdio/%)
diff --git a/libc/stdio/fscanf.c b/libc/stdio/fscanf.c
deleted file mode 100644
index 321b163..0000000
--- a/libc/stdio/fscanf.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/******************************************************************************
- * Copyright (c) 2004, 2008 IBM Corporation
- * All rights reserved.
- * This program and the accompanying materials
- * are made available under the terms of the BSD License
- * which accompanies this distribution, and is available at
- * http://www.opensource.org/licenses/bsd-license.php
- *
- * Contributors:
- * IBM Corporation - initial implementation
- *****************************************************************************/
-
-#include <stdio.h>
-
-int
-fscanf(FILE *stream, const char *fmt, ...)
-{
- int count;
- va_list ap;
-
- va_start(ap, fmt);
- count = vfscanf(stream, fmt, ap);
- va_end(ap);
-
- return count;
-}
diff --git a/libc/stdio/scanf.c b/libc/stdio/scanf.c
deleted file mode 100644
index 96b6399..0000000
--- a/libc/stdio/scanf.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/******************************************************************************
- * Copyright (c) 2004, 2008 IBM Corporation
- * All rights reserved.
- * This program and the accompanying materials
- * are made available under the terms of the BSD License
- * which accompanies this distribution, and is available at
- * http://www.opensource.org/licenses/bsd-license.php
- *
- * Contributors:
- * IBM Corporation - initial implementation
- *****************************************************************************/
-
-#include <stdio.h>
-
-int
-scanf(const char *fmt, ...)
-{
- int count;
- va_list ap;
-
- va_start(ap, fmt);
- count = vfscanf(stdin, fmt, ap);
- va_end(ap);
-
- return count;
-}
diff --git a/libc/stdio/vfscanf.c b/libc/stdio/vfscanf.c
deleted file mode 100644
index 85ca8be..0000000
--- a/libc/stdio/vfscanf.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/******************************************************************************
- * Copyright (c) 2004, 2008 IBM Corporation
- * All rights reserved.
- * This program and the accompanying materials
- * are made available under the terms of the BSD License
- * which accompanies this distribution, and is available at
- * http://www.opensource.org/licenses/bsd-license.php
- *
- * Contributors:
- * IBM Corporation - initial implementation
- *****************************************************************************/
-
-#include "string.h"
-#include "ctype.h"
-#include "stdlib.h"
-#include "stdio.h"
-#include "unistd.h"
-
-
-static int
-_getc(FILE * stream)
-{
- int count;
- char c;
-
- if (stream->mode == _IONBF || stream->buf == NULL) {
- if (read(stream->fd, &c, 1) == 1)
- return (int) c;
- else
- return EOF;
- }
-
- if (stream->pos == 0 || stream->pos >= BUFSIZ ||
- stream->buf[stream->pos] == '\0') {
- count = read(stream->fd, stream->buf, BUFSIZ);
- if (count < 0)
- count = 0;
- if (count < BUFSIZ)
- stream->buf[count] = '\0';
- stream->pos = 0;
- }
-
- return stream->buf[stream->pos++];
-}
-
-static void
-_ungetc(int ch, FILE * stream)
-{
- if (stream->mode != _IONBF && stream->pos > 0) {
- if (stream->pos < BUFSIZ)
- stream->buf[stream->pos] = ch;
- stream->pos--;
- }
-}
-
-static int
-_is_voidage(int ch)
-{
- if (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' || ch == '\0')
- return 1;
- else
- return 0;
-}
-
-
-static int
-_scanf(FILE * stream, const char *fmt, va_list * ap)
-{
- int i = 0;
- int length = 0;
-
- fmt++;
-
- while (*fmt != '\0') {
-
- char tbuf[256];
- char ch;
-
- switch (*fmt) {
- case 'd':
- case 'i':
- ch = _getc(stream);
- if (length == 0) {
- while (!_is_voidage(ch) && isdigit(ch)) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- } else {
- while (!_is_voidage(ch) && i < length
- && isdigit(ch)) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- }
- /* We tried to understand what this is good for...
- * but we did not. We know for sure that it does not
- * work on SLOF if this is active. */
- /* _ungetc(ch, stream); */
- tbuf[i] = '\0';
-
- /* ch = _getc(stream); */
- if (!_is_voidage(ch))
- _ungetc(ch, stream);
-
- if (strlen(tbuf) == 0)
- return 0;
-
- *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 10);
- break;
- case 'X':
- case 'x':
- ch = _getc(stream);
- if (length == 0) {
- while (!_is_voidage(ch) && isxdigit(ch)) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- } else {
- while (!_is_voidage(ch) && i < length
- && isxdigit(ch)) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- }
- /* _ungetc(ch, stream); */
- tbuf[i] = '\0';
-
- /* ch = _getc(stream); */
- if (!_is_voidage(ch))
- _ungetc(ch, stream);
-
- if (strlen(tbuf) == 0)
- return 0;
-
- *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 16);
- break;
- case 'O':
- case 'o':
- ch = _getc(stream);
- if (length == 0) {
- while (!_is_voidage(ch)
- && !(ch < '0' || ch > '7')) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- } else {
- while (!_is_voidage(ch) && i < length
- && !(ch < '0' || ch > '7')) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- }
- /* _ungetc(ch, stream); */
- tbuf[i] = '\0';
-
- /* ch = _getc(stream); */
- if (!_is_voidage(ch))
- _ungetc(ch, stream);
-
- if (strlen(tbuf) == 0)
- return 0;
-
- *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 8);
- break;
- case 'c':
- ch = _getc(stream);
- while (_is_voidage(ch))
- ch = _getc(stream);
-
- *(va_arg(*ap, char *)) = ch;
-
- ch = _getc(stream);
- if (!_is_voidage(ch))
- _ungetc(ch, stream);
-
- break;
- case 's':
- ch = _getc(stream);
- if (length == 0) {
- while (!_is_voidage(ch)) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- } else {
- while (!_is_voidage(ch) && i < length) {
- tbuf[i] = ch;
- ch = _getc(stream);
- i++;
- }
- }
- /* _ungetc(ch, stream); */
- tbuf[i] = '\0';
-
- /* ch = _getc(stream); */
- if (!_is_voidage(ch))
- _ungetc(ch, stream);
-
- strcpy(va_arg(*ap, char *), tbuf);
- break;
- default:
- if (*fmt >= '0' && *fmt <= '9')
- length += *fmt - '0';
- break;
- }
- fmt++;
- }
-
- return 1;
-}
-
-
-
-int
-vfscanf(FILE * stream, const char *fmt, va_list ap)
-{
- int args = 0;
-
- while (*fmt != '\0') {
-
- if (*fmt == '%') {
-
- char formstr[20];
- int i = 0;
-
- do {
- formstr[i] = *fmt;
- fmt++;
- i++;
- } while (!
- (*fmt == 'd' || *fmt == 'i' || *fmt == 'x'
- || *fmt == 'X' || *fmt == 'p' || *fmt == 'c'
- || *fmt == 's' || *fmt == '%' || *fmt == 'O'
- || *fmt == 'o'));
- formstr[i++] = *fmt;
- formstr[i] = '\0';
- if (*fmt != '%') {
- if (_scanf(stream, formstr, &ap) <= 0)
- return args;
- else
- args++;
- }
-
- }
-
- fmt++;
-
- }
-
- return args;
-}
-
-int
-getc(FILE * stream)
-{
- return _getc(stream);
-}
-
-int
-getchar(void)
-{
- return _getc(stdin);
-}
diff --git a/libc/stdio/vsscanf.c b/libc/stdio/vsscanf.c
deleted file mode 100644
index b9603e9..0000000
--- a/libc/stdio/vsscanf.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/******************************************************************************
- * Copyright (c) 2004, 2008 IBM Corporation
- * All rights reserved.
- * This program and the accompanying materials
- * are made available under the terms of the BSD License
- * which accompanies this distribution, and is available at
- * http://www.opensource.org/licenses/bsd-license.php
- *
- * Contributors:
- * IBM Corporation - initial implementation
- *****************************************************************************/
-
-#include "stdio.h"
-#include "stdlib.h"
-#include "string.h"
-
-
-static void
-_scanf(const char **buffer, const char *fmt, va_list *ap)
-{
- int i;
- int length = 0;
-
- fmt++;
-
- while(*fmt != '\0') {
-
- char tbuf[256];
-
- switch(*fmt) {
- case 'd':
- case 'i':
- if(length == 0) length = 256;
-
- for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
- tbuf[i] = **buffer;
- *buffer += 1;
- }
- tbuf[i] = '\0';
-
- *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 10);
- break;
- case 'X':
- case 'x':
- if(length == 0) length = 256;
-
- for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
- tbuf[i] = **buffer;
- *buffer += 1;
- }
- tbuf[i] = '\0';
-
- *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 16);
- break;
- case 'O':
- case 'o':
- if(length == 0) length = 256;
-
- for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
- tbuf[i] = **buffer;
- *buffer += 1;
- }
- tbuf[i] = '\0';
-
- *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 8);
- break;
- case 'c':
- *(va_arg(*ap, char *)) = **buffer;
- *buffer += 1;
- if(length > 1)
- for(i = 1; i < length; i++)
- *buffer += 1;
- break;
- case 's':
- if(length == 0) length = 256;
-
- for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
- tbuf[i] = **buffer;
- *buffer += 1;
- }
-
- tbuf[i] = '\0';
-
- strcpy(va_arg(*ap, char *), tbuf);
- break;
- default:
- if(*fmt >= '0' && *fmt <= '9')
- length += *fmt - '0';
- break;
- }
- fmt++;
- }
-
-}
-
-
-int
-vsscanf(const char *buffer, const char *fmt, va_list ap)
-{
-
- while(*fmt != '\0') {
-
- if(*fmt == '%') {
-
- char formstr[20];
- int i=0;
-
- do {
- formstr[i] = *fmt;
- fmt++;
- i++;
- } while(!(*fmt == 'd' || *fmt == 'i' || *fmt == 'x' || *fmt == 'X'
- || *fmt == 'p' || *fmt == 'c' || *fmt == 's' || *fmt == '%'
- || *fmt == 'O' || *fmt == 'o' ));
- formstr[i++] = *fmt;
- formstr[i] = '\0';
- if(*fmt != '%') {
- while(*buffer == ' ' || *buffer == '\t' || *buffer == '\n')
- buffer++;
- _scanf(&buffer, formstr, &ap);
- }
-
- }
-
- fmt++;
-
- }
-
- return 0;
-}
-
diff --git a/libfdt/fdt_rw.c b/libfdt/fdt_rw.c
index 994037b..befe87d 100644
--- a/libfdt/fdt_rw.c
+++ b/libfdt/fdt_rw.c
@@ -425,6 +425,10 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size
+ struct_size + fdt_size_dt_strings(fdt);
+ /* basic sanity check */
+ if (struct_size > bufsize)
+ return -FDT_ERR_BADSTRUCTURE;
+
if (bufsize < newsize)
return -FDT_ERR_NOSPACE;
diff --git a/libflash/blocklevel.c b/libflash/blocklevel.c
index 83823c5..9591194 100644
--- a/libflash/blocklevel.c
+++ b/libflash/blocklevel.c
@@ -58,6 +58,25 @@ static int ecc_protected(struct blocklevel_device *bl, uint32_t pos, uint32_t le
return 0;
}
+static int reacquire(struct blocklevel_device *bl)
+{
+ if (!bl->keep_alive && bl->reacquire)
+ return bl->reacquire(bl);
+ return 0;
+}
+
+static int release(struct blocklevel_device *bl)
+{
+ int rc = 0;
+ if (!bl->keep_alive && bl->release) {
+ /* This is the error return path a lot, preserve errno */
+ int err = errno;
+ rc = bl->release(bl);
+ errno = err;
+ }
+ return rc;
+}
+
int blocklevel_read(struct blocklevel_device *bl, uint32_t pos, void *buf, uint32_t len)
{
int rc;
@@ -69,14 +88,21 @@ int blocklevel_read(struct blocklevel_device *bl, uint32_t pos, void *buf, uint3
return FLASH_ERR_PARM_ERROR;
}
+ rc = reacquire(bl);
+ if (rc)
+ return rc;
+
if (!ecc_protected(bl, pos, len)) {
- return bl->read(bl, pos, buf, len);
+ rc = bl->read(bl, pos, buf, len);
+ release(bl);
+ return rc;
}
buffer = malloc(ecc_len);
if (!buffer) {
errno = ENOMEM;
- return FLASH_ERR_MALLOC_FAILED;
+ rc = FLASH_ERR_MALLOC_FAILED;
+ goto out;
}
rc = bl->read(bl, pos, buffer, ecc_len);
@@ -89,6 +115,7 @@ int blocklevel_read(struct blocklevel_device *bl, uint32_t pos, void *buf, uint3
}
out:
+ release(bl);
free(buffer);
return rc;
}
@@ -104,14 +131,21 @@ int blocklevel_write(struct blocklevel_device *bl, uint32_t pos, const void *buf
return FLASH_ERR_PARM_ERROR;
}
+ rc = reacquire(bl);
+ if (rc)
+ return rc;
+
if (!ecc_protected(bl, pos, len)) {
- return bl->write(bl, pos, buf, len);
+ rc = bl->write(bl, pos, buf, len);
+ release(bl);
+ return rc;
}
buffer = malloc(ecc_len);
if (!buffer) {
errno = ENOMEM;
- return FLASH_ERR_MALLOC_FAILED;
+ rc = FLASH_ERR_MALLOC_FAILED;
+ goto out;
}
if (memcpy_to_ecc(buffer, buf, len)) {
@@ -119,14 +153,18 @@ int blocklevel_write(struct blocklevel_device *bl, uint32_t pos, const void *buf
rc = FLASH_ERR_ECC_INVALID;
goto out;
}
+
rc = bl->write(bl, pos, buffer, ecc_len);
+
out:
+ release(bl);
free(buffer);
return rc;
}
int blocklevel_erase(struct blocklevel_device *bl, uint32_t pos, uint32_t len)
{
+ int rc;
if (!bl || !bl->erase) {
errno = EINVAL;
return FLASH_ERR_PARM_ERROR;
@@ -139,7 +177,15 @@ int blocklevel_erase(struct blocklevel_device *bl, uint32_t pos, uint32_t len)
return FLASH_ERR_ERASE_BOUNDARY;
}
- return bl->erase(bl, pos, len);
+ rc = reacquire(bl);
+ if (rc)
+ return rc;
+
+ rc = bl->erase(bl, pos, len);
+
+ release(bl);
+
+ return rc;
}
int blocklevel_get_info(struct blocklevel_device *bl, const char **name, uint32_t *total_size,
@@ -152,6 +198,10 @@ int blocklevel_get_info(struct blocklevel_device *bl, const char **name, uint32_
return FLASH_ERR_PARM_ERROR;
}
+ rc = reacquire(bl);
+ if (rc)
+ return rc;
+
rc = bl->get_info(bl, name, total_size, erase_granule);
/* Check the validity of what we are being told */
@@ -159,6 +209,8 @@ int blocklevel_get_info(struct blocklevel_device *bl, const char **name, uint32_
fprintf(stderr, "blocklevel_get_info: WARNING: erase_granule (0x%08x) and erase_mask"
" (0x%08x) don't match\n", *erase_granule, bl->erase_mask + 1);
+ release(bl);
+
return rc;
}
@@ -231,9 +283,13 @@ int blocklevel_smart_write(struct blocklevel_device *bl, uint32_t pos, const voi
if (!erase_buf) {
errno = ENOMEM;
rc = FLASH_ERR_MALLOC_FAILED;
- goto out;
+ goto out_free;
}
+ rc = reacquire(bl);
+ if (rc)
+ goto out_free;
+
while (len > 0) {
uint32_t erase_block = pos & ~(erase_size - 1);
uint32_t block_offset = pos & (erase_size - 1);
@@ -264,6 +320,8 @@ int blocklevel_smart_write(struct blocklevel_device *bl, uint32_t pos, const voi
}
out:
+ release(bl);
+out_free:
free(write_buf_start);
free(erase_buf);
return rc;
diff --git a/libflash/blocklevel.h b/libflash/blocklevel.h
index e9a1978..9f4285e 100644
--- a/libflash/blocklevel.h
+++ b/libflash/blocklevel.h
@@ -17,6 +17,7 @@
#define __LIBFLASH_BLOCKLEVEL_H
#include <stdint.h>
+#include <stdbool.h>
struct bl_prot_range {
uint32_t start;
@@ -39,6 +40,8 @@ enum blocklevel_flags {
*/
struct blocklevel_device {
void *priv;
+ int (*reacquire)(struct blocklevel_device *bl);
+ int (*release)(struct blocklevel_device *bl);
int (*read)(struct blocklevel_device *bl, uint32_t pos, void *buf, uint32_t len);
int (*write)(struct blocklevel_device *bl, uint32_t pos, const void *buf, uint32_t len);
int (*erase)(struct blocklevel_device *bl, uint32_t pos, uint32_t len);
@@ -49,6 +52,7 @@ struct blocklevel_device {
* Keep the erase mask so that blocklevel_erase() can do sanity checking
*/
uint32_t erase_mask;
+ bool keep_alive;
enum blocklevel_flags flags;
struct blocklevel_range ecc_prot;
diff --git a/libflash/file.c b/libflash/file.c
index 0dbe610..72e2da9 100644
--- a/libflash/file.c
+++ b/libflash/file.c
@@ -14,11 +14,10 @@
* limitations under the License.
*/
#define _GNU_SOURCE
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -36,14 +35,35 @@
struct file_data {
int fd;
char *name;
+ char *path;
struct blocklevel_device bl;
};
+static int file_release(struct blocklevel_device *bl)
+{
+ struct file_data *file_data = container_of(bl, struct file_data, bl);
+ close(file_data->fd);
+ file_data->fd = -1;
+ return 0;
+}
+
+static int file_reacquire(struct blocklevel_device *bl)
+{
+ struct file_data *file_data = container_of(bl, struct file_data, bl);
+ int fd;
+
+ fd = open(file_data->path, O_RDWR);
+ if (fd == -1)
+ return FLASH_ERR_PARM_ERROR;
+ file_data->fd = fd;
+ return 0;
+}
+
static int file_read(struct blocklevel_device *bl, uint32_t pos, void *buf, uint32_t len)
{
struct file_data *file_data = container_of(bl, struct file_data, bl);
- int count = 0;
- int rc;
+ int rc, count = 0;
+
rc = lseek(file_data->fd, pos, SEEK_SET);
/* errno should remain set */
if (rc != pos)
@@ -54,6 +74,7 @@ static int file_read(struct blocklevel_device *bl, uint32_t pos, void *buf, uint
/* errno should remain set */
if (rc == -1)
return FLASH_ERR_BAD_READ;
+
count += rc;
}
@@ -64,12 +85,11 @@ static int file_write(struct blocklevel_device *bl, uint32_t dst, const void *sr
uint32_t len)
{
struct file_data *file_data = container_of(bl, struct file_data, bl);
- int count = 0;
- int rc;
+ int rc, count = 0;
rc = lseek(file_data->fd, dst, SEEK_SET);
/* errno should remain set */
- if (rc == -1)
+ if (rc != dst)
return FLASH_ERR_PARM_ERROR;
while (count < len) {
@@ -77,6 +97,7 @@ static int file_write(struct blocklevel_device *bl, uint32_t dst, const void *sr
/* errno should remain set */
if (rc == -1)
return FLASH_ERR_VERIFY_FAILURE;
+
count += rc;
}
@@ -94,7 +115,7 @@ static int file_erase(struct blocklevel_device *bl, uint32_t dst, uint32_t len)
{
unsigned long long int d = ULLONG_MAX;
int i = 0;
- int rc = 0;
+ int rc;
while (len - i > 0) {
rc = file_write(bl, dst + i, &d, len - i > sizeof(d) ? sizeof(d) : len - i);
@@ -114,7 +135,10 @@ static int mtd_erase(struct blocklevel_device *bl, uint32_t dst, uint32_t len)
.length = len
};
- return ioctl(file_data->fd, MEMERASE, &erase_info) == -1 ? -1 : 0;
+ if (ioctl(file_data->fd, MEMERASE, &erase_info) == -1)
+ return FLASH_ERR_PARM_ERROR;
+
+ return 0;
}
static int get_info_name(struct file_data *file_data, char **name)
@@ -152,15 +176,16 @@ static int get_info_name(struct file_data *file_data, char **name)
}
-static int mtd_get_info(struct blocklevel_device *bl, const char **name, uint32_t *total_size,
- uint32_t *erase_granule)
+static int mtd_get_info(struct blocklevel_device *bl, const char **name,
+ uint32_t *total_size, uint32_t *erase_granule)
{
struct file_data *file_data = container_of(bl, struct file_data, bl);
struct mtd_info_user mtd_info;
int rc;
- if (ioctl(file_data->fd, MEMGETINFO, &mtd_info) == -1)
- return FLASH_ERR_BAD_READ;
+ rc = ioctl(file_data->fd, MEMGETINFO, &mtd_info);
+ if (rc == -1)
+ return FLASH_ERR_BAD_READ;
if (total_size)
*total_size = mtd_info.size;
@@ -178,8 +203,8 @@ static int mtd_get_info(struct blocklevel_device *bl, const char **name, uint32_
return 0;
}
-static int file_get_info(struct blocklevel_device *bl, const char **name, uint32_t *total_size,
- uint32_t *erase_granule)
+static int file_get_info(struct blocklevel_device *bl, const char **name,
+ uint32_t *total_size, uint32_t *erase_granule)
{
struct file_data *file_data = container_of(bl, struct file_data, bl);
struct stat st;
@@ -214,17 +239,26 @@ int file_init(int fd, struct blocklevel_device **bl)
*bl = NULL;
- file_data = malloc(sizeof(struct file_data));
+ file_data = calloc(1, sizeof(struct file_data));
if (!file_data)
return FLASH_ERR_MALLOC_FAILED;
- memset(file_data, 0, sizeof(*file_data));
file_data->fd = fd;
+ file_data->bl.reacquire = &file_reacquire;
+ file_data->bl.release = &file_release;
file_data->bl.read = &file_read;
file_data->bl.write = &file_write;
file_data->bl.erase = &file_erase;
file_data->bl.get_info = &file_get_info;
file_data->bl.erase_mask = 0;
+
+ /*
+ * If the blocklevel_device is only inited with file_init() then keep
+ * alive is assumed, as fd will change otherwise and this may break
+ * callers assumptions.
+ */
+ file_data->bl.keep_alive = 1;
+
/*
* Unfortunately not all file descriptors are created equal...
* Here we check to see if the file descriptor is to an MTD device, in
@@ -252,9 +286,12 @@ out:
return FLASH_ERR_PARM_ERROR;
}
-int file_init_path(const char *path, int *r_fd, struct blocklevel_device **bl)
+int file_init_path(const char *path, int *r_fd, bool keep_alive,
+ struct blocklevel_device **bl)
{
int fd, rc;
+ char *path_ptr = NULL;
+ struct file_data *file_data;
if (!path || !bl)
return FLASH_ERR_PARM_ERROR;
@@ -263,14 +300,32 @@ int file_init_path(const char *path, int *r_fd, struct blocklevel_device **bl)
if (fd == -1)
return FLASH_ERR_PARM_ERROR;
+ /*
+ * strdup() first so don't have to deal with malloc failure after
+ * file_init()
+ */
+ path_ptr = strdup(path);
+ if (!path_ptr) {
+ rc = FLASH_ERR_MALLOC_FAILED;
+ goto out;
+ }
+
rc = file_init(fd, bl);
if (rc)
- close(fd);
+ goto out;
+
+ file_data = container_of(*bl, struct file_data, bl);
+ file_data->bl.keep_alive = keep_alive;
+ file_data->path = path_ptr;
if (r_fd)
*r_fd = fd;
return rc;
+out:
+ free(path_ptr);
+ close(fd);
+ return rc;
}
void file_exit(struct blocklevel_device *bl)
@@ -280,6 +335,7 @@ void file_exit(struct blocklevel_device *bl)
free(bl->ecc_prot.prot);
file_data = container_of(bl, struct file_data, bl);
free(file_data->name);
+ free(file_data->path);
free(file_data);
}
}
diff --git a/libflash/file.h b/libflash/file.h
index a9a89fe..c8e58a6 100644
--- a/libflash/file.h
+++ b/libflash/file.h
@@ -17,6 +17,8 @@
#ifndef __LIBFLASH_FILE_H
#define __LIBFLASH_FILE_H
+#include <stdbool.h>
+
#include "blocklevel.h"
/*
@@ -34,7 +36,7 @@ void file_exit(struct blocklevel_device *bl);
* Because file_exit() doesn't close the file descriptor, file_init_path()
* makes it available.
*/
-int file_init_path(const char *path, int *fd, struct blocklevel_device **bl);
+int file_init_path(const char *path, int *fd, bool keep_alive, struct blocklevel_device **bl);
/*
* file_exit_close is a convenience wrapper which will close the open
diff --git a/libflash/libffs.c b/libflash/libffs.c
index 3010a61..51d9856 100644
--- a/libflash/libffs.c
+++ b/libflash/libffs.c
@@ -36,7 +36,6 @@ enum ffs_type {
struct ffs_handle {
struct ffs_hdr hdr; /* Converted header */
enum ffs_type type;
- struct flash_chip *chip;
uint32_t toc_offset;
uint32_t max_size;
void *cache;
@@ -73,7 +72,7 @@ static int ffs_check_convert_header(struct ffs_hdr *dst, struct ffs_hdr *src)
}
int ffs_init(uint32_t offset, uint32_t max_size, struct blocklevel_device *bl,
- struct ffs_handle **ffs, int mark_ecc)
+ struct ffs_handle **ffs, bool mark_ecc)
{
struct ffs_hdr hdr;
struct ffs_handle *f;
@@ -171,85 +170,6 @@ out:
return rc;
}
-/* ffs_open_image is Linux only as it uses lseek, which skiboot does not
- * implement */
-#ifndef __SKIBOOT__
-int ffs_open_image(int fd, uint32_t size, uint32_t toc_offset,
- struct ffs_handle **ffsh)
-{
- struct ffs_hdr hdr;
- struct ffs_handle *f;
- int rc;
-
- if (!ffsh)
- return FLASH_ERR_PARM_ERROR;
- *ffsh = NULL;
-
- if (fd < 0)
- return FLASH_ERR_PARM_ERROR;
-
- if ((toc_offset + size) < toc_offset)
- return FLASH_ERR_PARM_ERROR;
-
- /* Read flash header */
- rc = lseek(fd, toc_offset, SEEK_SET);
- if (rc < 0)
- return FLASH_ERR_PARM_ERROR;
-
- rc = read(fd, &hdr, sizeof(hdr));
- if (rc != sizeof(hdr))
- return FLASH_ERR_BAD_READ;
-
- /* Allocate ffs_handle structure and start populating */
- f = malloc(sizeof(*f));
- if (!f)
- return FLASH_ERR_MALLOC_FAILED;
- memset(f, 0, sizeof(*f));
- f->type = ffs_type_image;
- f->toc_offset = toc_offset;
- f->max_size = size;
- f->chip = NULL;
-
- /* Convert and check flash header */
- rc = ffs_check_convert_header(&f->hdr, &hdr);
- if (rc) {
- FL_ERR("FFS: Error %d checking flash header\n", rc);
- free(f);
- return rc;
- }
-
- /*
- * Decide how much of the image to grab to get the whole
- * partition map.
- */
- f->cached_size = f->hdr.block_size * f->hdr.size;
- FL_DBG("FFS: Partition map size: 0x%x\n", f->cached_size);
-
- /* Allocate cache */
- f->cache = malloc(f->cached_size);
- if (!f->cache) {
- free(f);
- return FLASH_ERR_MALLOC_FAILED;
- }
-
- /* Read the cached map */
- rc = lseek(fd, toc_offset, SEEK_SET);
- if (rc < 0)
- return FLASH_ERR_PARM_ERROR;
-
- rc = read(fd, f->cache, f->cached_size);
- if (rc != f->cached_size) {
- FL_ERR("FFS: Error %d reading flash partition map\n", rc);
- free(f);
- return FLASH_ERR_BAD_READ;
- }
-
- *ffsh = f;
-
- return 0;
-}
-#endif /*!__SKIBOOT__*/
-
void ffs_close(struct ffs_handle *ffs)
{
if (ffs->cache)
@@ -351,6 +271,46 @@ int ffs_part_info(struct ffs_handle *ffs, uint32_t part_idx,
return 0;
}
+/*
+ * There are quite a few ways one might consider two ffs_handles to be the
+ * same. For the purposes of this function we are trying to detect a fairly
+ * specific scenario:
+ * Consecutive calls to ffs_next_side() may succeed but have gone circular.
+ * It is possible that the OTHER_SIDE partition in one TOC actually points
+ * back to the TOC to first ffs_handle.
+ * This function compares for this case, therefore the requirements are
+ * simple, the underlying blocklevel_devices must be the same along with
+ * the toc_offset and the max_size.
+ */
+bool ffs_equal(struct ffs_handle *one, struct ffs_handle *two)
+{
+ return (!one && !two) || (one && two && one->bl == two->bl
+ && one->toc_offset == two->toc_offset
+ && one->max_size == two->max_size);
+}
+
+int ffs_next_side(struct ffs_handle *ffs, struct ffs_handle **new_ffs,
+ bool mark_ecc)
+{
+ int rc;
+ uint32_t index, offset, max_size;
+
+ if (!ffs || !new_ffs)
+ return FLASH_ERR_PARM_ERROR;
+
+ *new_ffs = NULL;
+
+ rc = ffs_lookup_part(ffs, "OTHER_SIDE", &index);
+ if (rc)
+ return rc;
+
+ rc = ffs_part_info(ffs, index, NULL, &offset, &max_size, NULL, NULL);
+ if (rc)
+ return rc;
+
+ return ffs_init(offset, max_size, ffs->bl, new_ffs, mark_ecc);
+}
+
int ffs_update_act_size(struct ffs_handle *ffs, uint32_t part_idx,
uint32_t act_size)
{
@@ -381,8 +341,6 @@ int ffs_update_act_size(struct ffs_handle *ffs, uint32_t part_idx,
}
ent->actual = cpu_to_be32(act_size);
ent->checksum = ffs_checksum(ent, FFS_ENTRY_SIZE_CSUM);
- if (!ffs->chip)
- return 0;
return blocklevel_write(ffs->bl, offset, ent, FFS_ENTRY_SIZE);
}
diff --git a/libflash/libffs.h b/libflash/libffs.h
index ddf3793..a9ff574 100644
--- a/libflash/libffs.h
+++ b/libflash/libffs.h
@@ -37,15 +37,28 @@ struct ffs_handle;
/* Init */
-int ffs_init(uint32_t offset, uint32_t max_size,
- struct blocklevel_device *bl, struct ffs_handle **ffs, int mark_ecc);
-
-/* ffs_open_image is Linux only as it uses lseek, which skiboot does not
- * implement */
-#ifndef __SKIBOOT__
-int ffs_open_image(int fd, uint32_t size, uint32_t toc_offset,
- struct ffs_handle **ffs);
-#endif
+int ffs_init(uint32_t offset, uint32_t max_size, struct blocklevel_device *bl,
+ struct ffs_handle **ffs, bool mark_ecc);
+
+/*
+ * Initialise a new ffs_handle to the "OTHER SIDE".
+ * Reuses the underlying blocklevel_device.
+ */
+int ffs_next_side(struct ffs_handle *ffs, struct ffs_handle **new_ffs,
+ bool mark_ecc);
+
+/*
+ * There are quite a few ways one might consider two ffs_handles to be the
+ * same. For the purposes of this function we are trying to detect a fairly
+ * specific scenario:
+ * Consecutive calls to ffs_next_side() may succeed but have gone circular.
+ * It is possible that the OTHER_SIDE partition in one TOC actually points
+ * back to the TOC of the first ffs_handle.
+ * This function compares for this case, therefore the requirements are
+ * simple, the underlying blocklevel_devices must be the same along with
+ * the toc_offset and the max_size.
+ */
+bool ffs_equal(struct ffs_handle *one, struct ffs_handle *two);
void ffs_close(struct ffs_handle *ffs);
diff --git a/libflash/libflash.c b/libflash/libflash.c
index c05c927..50dc54d 100644
--- a/libflash/libflash.c
+++ b/libflash/libflash.c
@@ -34,6 +34,9 @@ static const struct flash_info flash_info[] = {
{ 0x20ba20, 0x04000000, FL_ERASE_4K | FL_ERASE_64K | FL_CAN_4B |
FL_ERASE_BULK | FL_MICRON_BUGS,
"Micron N25Qx512Ax" },
+ { 0x20ba19, 0x02000000, FL_ERASE_4K | FL_ERASE_64K | FL_CAN_4B |
+ FL_ERASE_BULK | FL_MICRON_BUGS,
+ "Micron N25Q256Ax" },
{ 0x4d5444, 0x02000000, FL_ERASE_ALL | FL_CAN_4B, "File Abstraction"},
{ 0x55aa55, 0x00100000, FL_ERASE_ALL | FL_CAN_4B, "TEST_FLASH" },
{ 0xaa55aa, 0x02000000, FL_ERASE_ALL | FL_CAN_4B, "EMULATED_FLASH"},
@@ -749,8 +752,7 @@ static int flash_configure(struct flash_chip *c)
FL_DBG("LIBFLASH: Enabling controller 4B mode...\n");
rc = ct->set_4b(ct, true);
if (rc) {
- FL_ERR("LIBFLASH: Failed"
- " to set controller 4b mode\n");
+ FL_ERR("LIBFLASH: Failed to set controller 4b mode\n");
return rc;
}
}
@@ -833,6 +835,10 @@ bail:
return rc;
}
+ /* The flash backend doesn't support reiniting it */
+ c->bl.keep_alive = true;
+ c->bl.reacquire = NULL;
+ c->bl.release = NULL;
c->bl.read = &flash_read;
c->bl.write = &flash_smart_write;
c->bl.erase = &flash_erase;
diff --git a/libpore/sbe_xip_image.c b/libpore/sbe_xip_image.c
index 800892c..7ee4886 100644
--- a/libpore/sbe_xip_image.c
+++ b/libpore/sbe_xip_image.c
@@ -1070,8 +1070,11 @@ xipDecodeToc(void* i_image,
o_item->iv_toc = i_imageToc;
o_item->iv_type = hostToc.iv_type;
o_item->iv_elements = hostToc.iv_elements;
-
- sbe_xip_get_section(i_image, SBE_XIP_SECTION_STRINGS, &stringsSection);
+
+ rc = sbe_xip_get_section(i_image, SBE_XIP_SECTION_STRINGS,
+ &stringsSection);
+ if (rc) break;
+
o_item->iv_id =
(char*)i_image + stringsSection.iv_offset + hostToc.iv_id;
diff --git a/opal-ci/build-qemu-powernv.sh b/opal-ci/build-qemu-powernv.sh
new file mode 100755
index 0000000..5d2536e
--- /dev/null
+++ b/opal-ci/build-qemu-powernv.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -e
+
+git clone --depth=1 -b qemu-powernv-for-skiboot-2 git://github.com/open-power/qemu.git
+cd qemu
+git submodule update --init dtc
+./configure --target-list=ppc64-softmmu --disable-werror
+make -j `grep -c processor /proc/cpuinfo`
diff --git a/opal-ci/fetch-debian-jessie-installer.sh b/opal-ci/fetch-debian-jessie-installer.sh
new file mode 100755
index 0000000..6140ebd
--- /dev/null
+++ b/opal-ci/fetch-debian-jessie-installer.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://ftp.debian.org/debian/dists/jessie/main/installer-ppc64el/current/images/netboot/debian-installer/ppc64el/vmlinux -O debian-jessie-vmlinux
+wget http://ftp.debian.org/debian/dists/jessie/main/installer-ppc64el/current/images/netboot/debian-installer/ppc64el/initrd.gz -O debian-jessie-initrd.gz
diff --git a/opal-ci/install-deps-qemu-powernv.sh b/opal-ci/install-deps-qemu-powernv.sh
new file mode 100755
index 0000000..a67daf4
--- /dev/null
+++ b/opal-ci/install-deps-qemu-powernv.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+set -e
+sudo apt-get -y install eatmydata
+sudo eatmydata apt-get -y install gcc python g++ pkg-config \
+libz-dev libglib2.0-dev libpixman-1-dev libfdt-dev git
diff --git a/platforms/astbmc/garrison.c b/platforms/astbmc/garrison.c
index edc3522..57e41d7 100644
--- a/platforms/astbmc/garrison.c
+++ b/platforms/astbmc/garrison.c
@@ -21,9 +21,67 @@
#include <chip.h>
#include <ipmi.h>
#include <psi.h>
+#include <npu-regs.h>
#include "astbmc.h"
+#define NPU_BASE 0x8013c00
+#define NPU_SIZE 0x2c
+#define NPU_INDIRECT0 0x8000000008010c3f
+#define NPU_INDIRECT1 0x8000000008010c7f
+
+static void create_link(struct dt_node *npu, struct dt_node *pbcq, int index)
+{
+ struct dt_node *link;
+ uint32_t lane_mask;
+ uint64_t phy;
+ char namebuf[32];
+
+ snprintf(namebuf, sizeof(namebuf), "link@%x", index);
+ link = dt_new(npu, namebuf);
+
+ dt_add_property_string(link, "compatible", "ibm,npu-link");
+ dt_add_property_cells(link, "ibm,npu-link-index", index);
+
+ if (index < 4) {
+ phy = NPU_INDIRECT0;
+ lane_mask = 0xff << (index * 8);
+ } else {
+ phy = NPU_INDIRECT1;
+ lane_mask = 0xff0000 >> (index - 3) * 8;
+ }
+ dt_add_property_u64s(link, "ibm,npu-phy", phy);
+ dt_add_property_cells(link, "ibm,npu-lane-mask", lane_mask);
+ dt_add_property_cells(link, "ibm,npu-pbcq", pbcq->phandle);
+}
+
+static void dt_create_npu(void)
+{
+ struct dt_node *xscom, *npu, *pbcq;
+ char namebuf[32];
+
+ dt_for_each_compatible(dt_root, xscom, "ibm,xscom") {
+ snprintf(namebuf, sizeof(namebuf), "npu@%x", NPU_BASE);
+ npu = dt_new(xscom, namebuf);
+ dt_add_property_cells(npu, "reg", NPU_BASE, NPU_SIZE);
+ dt_add_property_strings(npu, "compatible", "ibm,power8-npu");
+ dt_add_property_cells(npu, "ibm,npu-index", 0);
+ dt_add_property_cells(npu, "ibm,npu-links", 4);
+
+ /* On Garrison we have 2 links per GPU device. The
+ * first 2 links go to the GPU connected via
+ * pbcq@2012c00 the second two via pbcq@2012800. */
+ pbcq = dt_find_by_name(xscom, "pbcq@2012c00");
+ assert(pbcq);
+ create_link(npu, pbcq, 0);
+ create_link(npu, pbcq, 1);
+ pbcq = dt_find_by_name(xscom, "pbcq@2012800");
+ assert(pbcq);
+ create_link(npu, pbcq, 4);
+ create_link(npu, pbcq, 5);
+ }
+}
+
static bool garrison_probe(void)
{
if (!dt_node_is_compatible(dt_root, "ibm,garrison"))
@@ -43,6 +101,9 @@ static bool garrison_probe(void)
*/
psi_set_external_irq_policy(EXTERNAL_IRQ_POLICY_LINUX);
+ /* Fixups until HB get the NPU bindings */
+ dt_create_npu();
+
return true;
}
diff --git a/skiboot.spec b/skiboot.spec
new file mode 100644
index 0000000..bc48695
--- /dev/null
+++ b/skiboot.spec
@@ -0,0 +1,115 @@
+Name: opal-prd
+Version: 5.1.12
+Release: 1%{?dist}
+Summary: OPAL Processor Recovery Diagnostics Daemon
+
+Group: System Environment/Daemons
+License: ASL 2.0
+URL: http://github.com/open-power/skiboot
+ExclusiveArch: ppc64le
+
+BuildRequires: systemd
+
+Requires: systemd
+
+Source0: https://github.com/open-power/skiboot/archive/skiboot-%{version}.tar.gz
+Source1: opal-prd.socket
+Source2: opal-prd.service
+
+%description
+This package provides a daemon to load and run the OpenPower firmware's
+Processor Recovery Diagnostics binary. This is responsible for run time
+maintenance of OpenPower Systems hardware.
+
+
+%package -n opal-utils
+Summary: OPAL firmware utilities
+Group: Applications/System
+
+%description -n opal-utils
+This package contains utility programs.
+
+The 'gard' utility can read, parse and clear hardware gard partitions
+on OpenPower platforms. The 'getscom' and 'putscom' utilities provide
+an interface to query or modify the registers of the different chipsets
+of an OpenPower system. 'pflash' is a tool to access the flash modules
+on such systems and update the OpenPower firmware.
+
+%package -n opal-firmware
+Summary: OPAL firmware
+BuildArch: noarch
+
+%description -n opal-firmware
+OPAL firmware, aka skiboot, loads the bootloader and provides runtime
+services to the OS (Linux) on IBM Power and OpenPower systems.
+
+
+%prep
+
+%setup -q -n skiboot-skiboot-%version
+
+%build
+SKIBOOT_VERSION=%version CROSS= make V=1 %{?_smp_mflags}
+OPAL_PRD_VERSION=%version make %{?_smp_mflags} V=1 -C external/opal-prd
+GARD_VERSION=%version make V=1 %{?_smp_mflags} -C external/gard
+PFLASH_VERSION=%version make V=1 %{?_smp_mflags} -C external/pflash
+make V=1 %{?_smp_mflags} -C external/xscom-utils
+
+%install
+make -C external/opal-prd install DESTDIR=%{buildroot} prefix=/usr
+make -C external/gard install DESTDIR=%{buildroot} prefix=/usr
+cp external/xscom-utils/{get,put}scom %{buildroot}%{_sbindir}
+cp external/pflash/pflash %{buildroot}%{_sbindir}
+
+mkdir -p %{buildroot}%{_unitdir}
+install -m 644 -p %{SOURCE1} %{buildroot}%{_unitdir}/opal-prd.socket
+install -m 644 -p %{SOURCE2} %{buildroot}%{_unitdir}/opal-prd.service
+
+mkdir -p %{buildroot}%{_datadir}/qemu
+install -m 644 -p skiboot.lid %{buildroot}%{_datadir}/qemu/skiboot.lid
+
+%post
+if [ $1 -eq 1 ] ; then
+ # Initial installation
+ /bin/systemctl enable opal-prd.service >/dev/null 2>&1 || :
+ /bin/systemctl start opal-prd.service >/dev/null 2>&1 || :
+fi
+
+%preun
+if [ $1 -eq 0 ] ; then
+ # Package removal, not upgrade
+ /bin/systemctl --no-reload disable opal-prd.service > /dev/null 2>&1 || :
+ /bin/systemctl stop opal-prd.service > /dev/null 2>&1 || :
+fi
+
+%postun
+systemctl daemon-reload >/dev/null 2>&1 || :
+if [ "$1" -ge 1 ] ; then
+ /bin/systemctl try-restart opal-prd.service >/dev/null 2>&1 || :
+fi
+
+%files
+%doc README
+%license LICENCE
+%{_sbindir}/opal-prd
+%{_unitdir}/opal-prd.socket
+%{_unitdir}/opal-prd.service
+%{_mandir}/man8/*
+
+%files -n opal-utils
+%doc README
+%license LICENCE
+%{_sbindir}/opal-gard
+%{_sbindir}/getscom
+%{_sbindir}/putscom
+%{_sbindir}/pflash
+%{_mandir}/man1/*
+
+%files -n opal-firmware
+%doc README
+%license LICENCE
+%{_datadir}/qemu/
+
+%changelog
+* Mon Nov 23 2015 Vasant Hegde <hegdevasant@linux.vnet.ibm.com> - 5.1.12
+- initial upstream spec file
diff --git a/test/Makefile.check b/test/Makefile.check
index de23bfb..8dc540a 100644
--- a/test/Makefile.check
+++ b/test/Makefile.check
@@ -1,12 +1,18 @@
-check: boot-check
+check: boot-check qemu-boot-check debian-jessie-boot-check
boot-check: skiboot.lid
- ./test/run_boot_test.sh
+ ./test/run_mambo_boot_test.sh
+
+qemu-boot-check: skiboot.lid
+ ./test/run_qemu_boot_test.sh
+
+debian-jessie-boot-check: skiboot.lid
+ ./test/run_qemu-jessie-debian-installer_boot_test.sh
OP_BUILD_BOOT_CHECK=op-build-v1.0 op-build-v1.1 op-build-v1.2 op-build-v1.2.1
boot-check-%: skiboot.lid skiboot.map
- SKIBOOT_MEM_DUMP=skiboot-$(@:boot-check-%=%).dump SKIBOOT_ZIMAGE=`pwd`/opal-ci/images/$(@:boot-check-%=%)/zImage.epapr ./test/run_boot_test.sh
+ SKIBOOT_MEM_DUMP=skiboot-$(@:boot-check-%=%).dump SKIBOOT_ZIMAGE=`pwd`/opal-ci/images/$(@:boot-check-%=%)/zImage.epapr ./test/run_mambo_boot_test.sh
boot-tests: boot-check $(OP_BUILD_BOOT_CHECK:%=boot-check-%)
diff --git a/test/hello_world/Makefile.check b/test/hello_world/Makefile.check
index 37b0282..7a1889e 100644
--- a/test/hello_world/Makefile.check
+++ b/test/hello_world/Makefile.check
@@ -1,11 +1,14 @@
HELLO_WORLD_TEST := test/hello_world/hello_kernel/hello_kernel
-check: $(HELLO_WORLD_TEST:%=%-check)
+check: $(HELLO_WORLD_TEST:%=%-check-mambo) $(HELLO_WORLD_TEST:%=%-check-qemu)
-boot-tests: $(HELLO_WORLD_TEST:%=%-check)
+boot-tests: $(HELLO_WORLD_TEST:%=%-check-mambo) $(HELLO_WORLD_TEST:%=%-check-qemu)
-$(HELLO_WORLD_TEST:%=%-check) : %-check: % skiboot.lid
- ./test/hello_world/run_hello_world.sh
+$(HELLO_WORLD_TEST:%=%-check-mambo) : %-check-mambo: % skiboot.lid
+ ./test/hello_world/run_mambo_hello_world.sh
+
+$(HELLO_WORLD_TEST:%=%-check-qemu) : %-check-qemu: % skiboot.lid
+ ./test/hello_world/run_qemu_hello_world.sh
test/hello_world/hello_kernel/hello_kernel.o: test/hello_world/hello_kernel/hello_kernel.S test/hello_world/hello_kernel/hello_kernel.ld
$(call Q,CC, $(CC) -m64 -c -MMD -o $@ $< ,$@)
diff --git a/test/hello_world/run_hello_world.sh b/test/hello_world/run_mambo_hello_world.sh
index e6f4d86..e6f4d86 100755
--- a/test/hello_world/run_hello_world.sh
+++ b/test/hello_world/run_mambo_hello_world.sh
diff --git a/test/hello_world/run_qemu_hello_world.sh b/test/hello_world/run_qemu_hello_world.sh
new file mode 100755
index 0000000..3292178
--- /dev/null
+++ b/test/hello_world/run_qemu_hello_world.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+
+if [ -z "$QEMU_PATH" ]; then
+ QEMU_PATH=`pwd`/opal-ci/qemu/ppc64-softmmu/
+fi
+
+if [ -z "$QEMU_BINARY" ]; then
+ QEMU_BINARY="qemu-system-ppc64"
+fi
+
+if [ ! -x "$QEMU_PATH/$QEMU_BINARY" ]; then
+ echo 'Could not find executable QEMU_BINARY. Skipping hello_world test';
+ exit 0;
+fi
+
+if [ -n "$KERNEL" ]; then
+ echo 'Please rebuild skiboot without KERNEL set. Skipping hello_world test';
+ exit 0;
+fi
+
+if [ ! `command -v expect` ]; then
+ echo 'Could not find expect binary. Skipping hello_world test';
+ exit 0;
+fi
+
+
+export SKIBOOT_ZIMAGE=`pwd`/test/hello_world/hello_kernel/hello_kernel
+
+
+(
+cat <<EOF | expect
+set timeout 30
+spawn $QEMU_PATH/$QEMU_BINARY -m 1G -M powernv -kernel $SKIBOOT_ZIMAGE -nographic
+expect {
+timeout { send_user "\nTimeout waiting for hello world\n"; exit 1 }
+eof { send_user "\nUnexpected EOF\n;" exit 1 }
+"Hello World!"
+}
+close
+wait
+exit 0
+EOF
+)
+exit 0;
diff --git a/test/run_boot_test.sh b/test/run_mambo_boot_test.sh
index 8ae7fda..8ae7fda 100755
--- a/test/run_boot_test.sh
+++ b/test/run_mambo_boot_test.sh
diff --git a/test/run_qemu-jessie-debian-installer_boot_test.sh b/test/run_qemu-jessie-debian-installer_boot_test.sh
new file mode 100755
index 0000000..7609125
--- /dev/null
+++ b/test/run_qemu-jessie-debian-installer_boot_test.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+
+if [ -z "$QEMU_PATH" ]; then
+ QEMU_PATH=`pwd`/opal-ci/qemu/ppc64-softmmu/
+fi
+
+if [ -z "$QEMU_BINARY" ]; then
+ QEMU_BINARY="qemu-system-ppc64"
+fi
+
+if [ ! -x "$QEMU_PATH/$QEMU_BINARY" ]; then
+ echo 'Could not find executable QEMU_BINARY. Skipping hello_world test';
+ exit 0;
+fi
+
+if [ -n "$KERNEL" ]; then
+ echo 'Please rebuild skiboot without KERNEL set. Skipping boot test';
+ exit 0;
+fi
+
+if [ ! `command -v expect` ]; then
+ echo 'Could not find expect binary. Skipping boot test';
+ exit 0;
+fi
+
+if [ ! -f debian-jessie-vmlinux ]; then
+ echo 'No debian-jessie-vmlinux kernel! Run opal-ci/fetch-debian-jessie-installer.sh : Skipping test.';
+ exit 0;
+fi
+
+if [ ! -f debian-jessie-initrd.gz ]; then
+ echo 'No debian-jessie-initrd.gz! Run opal-ci/fetch-debian-jessie-installer.sh : Skipping test';
+ exit 0;
+fi
+
+T=`mktemp --tmpdir skiboot_qemu_debian-jessie-boot_test.XXXXXXXXXX`
+D=`mktemp --tmpdir debian-jessie-install.qcow2.XXXXXXXXXX`
+
+# In future we should do full install:
+# FIXME: -append "DEBIAN_FRONTEND=text locale=en_US keymap=us hostname=OPALtest domain=unassigned-domain rescue/enable=true"
+
+$QEMU_PATH/../qemu-img create -f qcow2 $D 128G 2>&1 > $T
+
+( cat <<EOF | expect
+set timeout 600
+spawn $QEMU_PATH/$QEMU_BINARY -m 2G -M powernv -kernel debian-jessie-vmlinux -initrd debian-jessie-initrd.gz -nographic -hda $D
+expect {
+timeout { send_user "\nTimeout waiting for petitboot\n"; exit 1 }
+eof { send_user "\nUnexpected EOF\n;" exit 1 }
+"Starting system log daemon"
+}
+close
+wait
+exit 0
+EOF
+) 2>&1 >> $T
+E=$?
+
+if [ $E -eq 0 ]; then
+ rm $T $D
+else
+ echo "Boot Test FAILED. Results in $T, Disk $D";
+fi
+
+echo
+exit $E;
diff --git a/test/run_qemu_boot_test.sh b/test/run_qemu_boot_test.sh
new file mode 100755
index 0000000..4809810
--- /dev/null
+++ b/test/run_qemu_boot_test.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+
+if [ -z "$QEMU_PATH" ]; then
+ QEMU_PATH=`pwd`/opal-ci/qemu/ppc64-softmmu/
+fi
+
+if [ -z "$QEMU_BINARY" ]; then
+ QEMU_BINARY="qemu-system-ppc64"
+fi
+
+if [ ! -x "$QEMU_PATH/$QEMU_BINARY" ]; then
+ echo 'Could not find executable QEMU_BINARY. Skipping hello_world test';
+ exit 0;
+fi
+
+if [ -n "$KERNEL" ]; then
+ echo 'Please rebuild skiboot without KERNEL set. Skipping hello_world test';
+ exit 0;
+fi
+
+if [ ! `command -v expect` ]; then
+ echo 'Could not find expect binary. Skipping hello_world test';
+ exit 0;
+fi
+
+if [ -z "$SKIBOOT_ZIMAGE" ]; then
+ export SKIBOOT_ZIMAGE=`pwd`/zImage.epapr
+fi
+
+if [ ! -f "$SKIBOOT_ZIMAGE" ]; then
+ echo "No $SKIBOOT_ZIMAGE, skipping boot test";
+ exit 0;
+fi
+
+T=`mktemp --tmpdir skiboot_qemu_boot_test.XXXXXXXXXX`
+
+( cat <<EOF | expect
+set timeout 600
+spawn $QEMU_PATH/$QEMU_BINARY -m 1G -M powernv -kernel $SKIBOOT_ZIMAGE -nographic
+expect {
+timeout { send_user "\nTimeout waiting for petitboot\n"; exit 1 }
+eof { send_user "\nUnexpected EOF\n;" exit 1 }
+"Welcome to Petitboot"
+}
+close
+wait
+exit 0
+EOF
+) 2>&1 > $T
+E=$?
+
+if [ $E -eq 0 ]; then
+ rm $T
+else
+ echo "Boot Test FAILED. Results in $T";
+fi
+
+echo
+exit $E;