aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore27
-rw-r--r--LICENCE202
-rw-r--r--Makefile39
-rw-r--r--Makefile.main154
-rw-r--r--Makefile.rules53
-rw-r--r--asm/Makefile.inc11
-rw-r--r--asm/asm-offsets.c85
-rw-r--r--asm/head.S996
-rw-r--r--asm/kernel-wrapper.S26
-rw-r--r--asm/lock.S43
-rw-r--r--asm/misc.S43
-rw-r--r--ccan/Makefile.inc8
-rw-r--r--ccan/array_size/LICENSE28
-rw-r--r--ccan/array_size/_info46
-rw-r--r--ccan/array_size/array_size.h26
-rw-r--r--ccan/array_size/test/compile_fail-function-param.c24
-rw-r--r--ccan/array_size/test/compile_fail.c14
-rw-r--r--ccan/array_size/test/run.c33
-rw-r--r--ccan/build_assert/LICENSE28
-rw-r--r--ccan/build_assert/_info49
-rw-r--r--ccan/build_assert/build_assert.h40
-rw-r--r--ccan/build_assert/test/compile_fail-expr.c10
-rw-r--r--ccan/build_assert/test/compile_fail.c9
-rw-r--r--ccan/build_assert/test/compile_ok.c7
-rw-r--r--ccan/build_assert/test/run-BUILD_ASSERT_OR_ZERO.c9
-rw-r--r--ccan/check_type/LICENSE28
-rw-r--r--ccan/check_type/_info33
-rw-r--r--ccan/check_type/check_type.h64
-rw-r--r--ccan/check_type/test/compile_fail-check_type.c9
-rw-r--r--ccan/check_type/test/compile_fail-check_type_unsigned.c14
-rw-r--r--ccan/check_type/test/compile_fail-check_types_match.c10
-rw-r--r--ccan/check_type/test/run.c22
-rw-r--r--ccan/container_of/LICENSE28
-rw-r--r--ccan/container_of/_info63
-rw-r--r--ccan/container_of/container_of.h109
-rw-r--r--ccan/container_of/test/compile_fail-bad-type.c22
-rw-r--r--ccan/container_of/test/compile_fail-types.c22
-rw-r--r--ccan/container_of/test/compile_fail-var-types.c25
-rw-r--r--ccan/container_of/test/run.c24
-rw-r--r--ccan/endian/LICENSE28
-rw-r--r--ccan/endian/_info53
-rw-r--r--ccan/endian/endian.h314
-rw-r--r--ccan/endian/test/compile_ok-constant.c12
-rw-r--r--ccan/endian/test/run.c106
-rw-r--r--ccan/list/LICENSE17
-rw-r--r--ccan/list/_info70
-rw-r--r--ccan/list/list.c43
-rw-r--r--ccan/list/list.h492
-rw-r--r--ccan/list/test/compile_ok-constant.c49
-rw-r--r--ccan/list/test/helper.c56
-rw-r--r--ccan/list/test/helper.h9
-rw-r--r--ccan/list/test/run-check-corrupt.c89
-rw-r--r--ccan/list/test/run-list_del_from-assert.c36
-rw-r--r--ccan/list/test/run-single-eval.c168
-rw-r--r--ccan/list/test/run-with-debug.c3
-rw-r--r--ccan/list/test/run.c200
-rw-r--r--ccan/short_types/LICENSE28
-rw-r--r--ccan/short_types/_info81
-rw-r--r--ccan/short_types/short_types.h32
-rw-r--r--ccan/short_types/test/run.c38
-rw-r--r--ccan/str/LICENSE28
-rw-r--r--ccan/str/_info52
-rw-r--r--ccan/str/str.c13
-rw-r--r--ccan/str/str.h120
-rw-r--r--ccan/str/test/compile_fail-STR_MAX_CHARS.c23
-rw-r--r--ccan/str/test/compile_fail-isalnum.c22
-rw-r--r--ccan/str/test/compile_fail-isalpha.c22
-rw-r--r--ccan/str/test/compile_fail-isascii.c22
-rw-r--r--ccan/str/test/compile_fail-isblank.c26
-rw-r--r--ccan/str/test/compile_fail-iscntrl.c22
-rw-r--r--ccan/str/test/compile_fail-isdigit.c22
-rw-r--r--ccan/str/test/compile_fail-islower.c22
-rw-r--r--ccan/str/test/compile_fail-isprint.c22
-rw-r--r--ccan/str/test/compile_fail-ispunct.c22
-rw-r--r--ccan/str/test/compile_fail-isspace.c22
-rw-r--r--ccan/str/test/compile_fail-isupper.c22
-rw-r--r--ccan/str/test/compile_fail-isxdigit.c22
-rw-r--r--ccan/str/test/compile_fail-strchr.c18
-rw-r--r--ccan/str/test/compile_fail-strrchr.c18
-rw-r--r--ccan/str/test/compile_fail-strstr.c18
-rw-r--r--ccan/str/test/debug.c5
-rw-r--r--ccan/str/test/run-STR_MAX_CHARS.c59
-rw-r--r--ccan/str/test/run.c106
-rw-r--r--core/Makefile.inc12
-rw-r--r--core/affinity.c132
-rw-r--r--core/backtrace.c41
-rw-r--r--core/chip.c85
-rw-r--r--core/console.c334
-rw-r--r--core/cpu.c672
-rw-r--r--core/device.c791
-rw-r--r--core/exceptions.c529
-rw-r--r--core/fast-reboot.c346
-rw-r--r--core/fdt.c208
-rw-r--r--core/flash-nvram.c76
-rw-r--r--core/hostservices.c826
-rw-r--r--core/init.c687
-rw-r--r--core/interrupts.c332
-rw-r--r--core/lock.c125
-rw-r--r--core/malloc.c84
-rw-r--r--core/mem_region.c956
-rw-r--r--core/nvram.c248
-rw-r--r--core/opal-msg.c167
-rw-r--r--core/opal.c308
-rw-r--r--core/pci-opal.c666
-rw-r--r--core/pci.c1388
-rw-r--r--core/platform.c78
-rw-r--r--core/relocate.c65
-rw-r--r--core/test/Makefile.check29
-rw-r--r--core/test/run-device.c118
-rw-r--r--core/test/run-malloc-speed.c94
-rw-r--r--core/test/run-malloc.c144
-rw-r--r--core/test/run-mem_region.c250
-rw-r--r--core/test/run-mem_region_init.c179
-rw-r--r--core/test/run-mem_region_release_unused.c177
-rw-r--r--core/test/run-mem_region_release_unused_noalloc.c159
-rw-r--r--core/test/run-msg.c256
-rw-r--r--core/test/run-trace.c386
-rw-r--r--core/test/stubs.c43
-rw-r--r--core/timebase.c67
-rw-r--r--core/trace.c244
-rw-r--r--core/utils.c59
-rw-r--r--core/vpd.c211
-rw-r--r--doc/device-tree.txt516
-rw-r--r--doc/error-logging.txt384
-rw-r--r--doc/overview.txt116
-rw-r--r--doc/pci-slot-properties.txt17
-rw-r--r--doc/vpd-properties.txt19
-rw-r--r--doc/xscom-node-bindings.txt57
-rw-r--r--external/Makefile7
-rw-r--r--external/dump_trace.c197
-rw-r--r--external/trace.c105
-rw-r--r--external/trace.h20
-rw-r--r--hdata/Makefile.inc8
-rw-r--r--hdata/cpu-common.c280
-rw-r--r--hdata/fsp.c200
-rw-r--r--hdata/hdata.h49
-rw-r--r--hdata/hdif.c140
-rw-r--r--hdata/hdif.h141
-rw-r--r--hdata/hostservices.c96
-rw-r--r--hdata/iohub.c715
-rw-r--r--hdata/memory.c377
-rw-r--r--hdata/paca.c322
-rw-r--r--hdata/pcia.c242
-rw-r--r--hdata/slca.c89
-rw-r--r--hdata/spira.c965
-rw-r--r--hdata/spira.h864
-rw-r--r--hdata/test/Makefile.check18
-rw-r--r--hdata/test/hdata_to_dt.c215
-rw-r--r--hdata/test/stubs.c47
-rw-r--r--hdata/vpd-common.c38
-rw-r--r--hdata/vpd.c851
-rw-r--r--hw/Makefile.inc15
-rw-r--r--hw/ast-bmc/Makefile.inc5
-rw-r--r--hw/ast-bmc/ast-io.c324
-rw-r--r--hw/ast-bmc/ast-sf-ctrl.c412
-rw-r--r--hw/cec.c84
-rw-r--r--hw/centaur.c326
-rw-r--r--hw/chiptod.c685
-rw-r--r--hw/ec/Makefile.inc8
-rw-r--r--hw/ec/gpio.c87
-rw-r--r--hw/ec/makefile8
-rw-r--r--hw/fsi-master.c297
-rw-r--r--hw/fsp/Makefile.inc9
-rw-r--r--hw/fsp/fsp-codeupdate.c1197
-rw-r--r--hw/fsp/fsp-console.c922
-rw-r--r--hw/fsp/fsp-diag.c58
-rw-r--r--hw/fsp/fsp-dump.c917
-rw-r--r--hw/fsp/fsp-elog-read.c520
-rw-r--r--hw/fsp/fsp-elog-write.c643
-rw-r--r--hw/fsp/fsp-leds.c1080
-rw-r--r--hw/fsp/fsp-mdst-table.c252
-rw-r--r--hw/fsp/fsp-mem-err.c415
-rw-r--r--hw/fsp/fsp-nvram.c414
-rw-r--r--hw/fsp/fsp-op-panel.c249
-rw-r--r--hw/fsp/fsp-rtc.c572
-rw-r--r--hw/fsp/fsp-sensor.c788
-rw-r--r--hw/fsp/fsp-surveillance.c209
-rw-r--r--hw/fsp/fsp-sysparam.c454
-rw-r--r--hw/fsp/fsp.c2147
-rw-r--r--hw/gx.c158
-rw-r--r--hw/homer.c143
-rw-r--r--hw/lpc-uart.c343
-rw-r--r--hw/lpc.c500
-rw-r--r--hw/nx.c127
-rw-r--r--hw/occ.c477
-rw-r--r--hw/p5ioc2-phb.c1233
-rw-r--r--hw/p5ioc2.c297
-rw-r--r--hw/p7ioc-inits.c1096
-rw-r--r--hw/p7ioc-phb.c3206
-rw-r--r--hw/p7ioc.c677
-rw-r--r--hw/phb3.c3880
-rw-r--r--hw/psi.c873
-rw-r--r--hw/sfc-ctrl.c523
-rw-r--r--hw/slw.c875
-rw-r--r--hw/xscom.c518
-rw-r--r--include/affinity.h34
-rw-r--r--include/asm-utils.h45
-rw-r--r--include/ast.h78
-rw-r--r--include/bitutils.h50
-rw-r--r--include/capp.h62
-rw-r--r--include/cec.h51
-rw-r--r--include/centaur.h24
-rw-r--r--include/chip.h151
-rw-r--r--include/chiptod.h28
-rw-r--r--include/codeupdate.h236
-rw-r--r--include/compiler.h50
-rw-r--r--include/config.h93
-rw-r--r--include/console.h68
-rw-r--r--include/cpu.h207
-rw-r--r--include/device.h233
-rw-r--r--include/device_tree.h35
-rw-r--r--include/ec/config.h82
-rw-r--r--include/ec/gpio.h53
-rw-r--r--include/elf.h135
-rw-r--r--include/fsi-master.h36
-rw-r--r--include/fsp-elog.h325
-rw-r--r--include/fsp-leds.h135
-rw-r--r--include/fsp-mdst-table.h37
-rw-r--r--include/fsp-sysparam.h57
-rw-r--r--include/fsp.h755
-rw-r--r--include/gx.h59
-rw-r--r--include/hostservices.h36
-rw-r--r--include/interrupts.h254
-rw-r--r--include/io.h175
-rw-r--r--include/lock.h83
-rw-r--r--include/lpc.h95
-rw-r--r--include/mem-map.h114
-rw-r--r--include/mem_region-malloc.h41
-rw-r--r--include/mem_region.h69
-rw-r--r--include/memory.h23
-rw-r--r--include/nx.h22
-rw-r--r--include/op-panel.h67
-rw-r--r--include/opal-internal.h234
-rw-r--r--include/opal-msg.h36
-rw-r--r--include/opal.h912
-rw-r--r--include/p5ioc2-regs.h251
-rw-r--r--include/p5ioc2.h184
-rw-r--r--include/p7ioc-regs.h472
-rw-r--r--include/p7ioc.h431
-rw-r--r--include/pci-cfg.h524
-rw-r--r--include/pci.h504
-rw-r--r--include/phb3-regs.h436
-rw-r--r--include/phb3.h355
-rw-r--r--include/platform.h122
-rw-r--r--include/processor.h318
-rw-r--r--include/psi.h239
-rw-r--r--include/sfc-ctrl.h24
-rw-r--r--include/skiboot.h202
-rw-r--r--include/spcn.h93
-rw-r--r--include/stack.h92
-rw-r--r--include/timebase.h91
-rw-r--r--include/trace.h46
-rw-r--r--include/trace_types.h132
-rw-r--r--include/types.h27
-rw-r--r--include/vpd.h45
-rw-r--r--include/xscom.h171
-rw-r--r--libc/Makefile.inc12
-rw-r--r--libc/README.txt62
-rw-r--r--libc/ctype/Makefile.inc19
-rw-r--r--libc/ctype/isdigit.c26
-rw-r--r--libc/ctype/isprint.c19
-rw-r--r--libc/ctype/isspace.c30
-rw-r--r--libc/ctype/isxdigit.c22
-rw-r--r--libc/ctype/tolower.c19
-rw-r--r--libc/ctype/toupper.c21
-rw-r--r--libc/include/assert.h29
-rw-r--r--libc/include/ctype.h24
-rw-r--r--libc/include/errno.h34
-rw-r--r--libc/include/getopt.h37
-rw-r--r--libc/include/limits.h33
-rw-r--r--libc/include/stdint.h30
-rw-r--r--libc/include/stdio.h66
-rw-r--r--libc/include/stdlib.h30
-rw-r--r--libc/include/string.h38
-rw-r--r--libc/include/time.h42
-rw-r--r--libc/include/unistd.h26
-rw-r--r--libc/stdio/Makefile.inc21
-rw-r--r--libc/stdio/fileno.c19
-rw-r--r--libc/stdio/fprintf.c26
-rw-r--r--libc/stdio/fputc.c24
-rw-r--r--libc/stdio/fputs.c23
-rw-r--r--libc/stdio/fscanf.c26
-rw-r--r--libc/stdio/printf.c27
-rw-r--r--libc/stdio/putchar.c21
-rw-r--r--libc/stdio/puts.c27
-rw-r--r--libc/stdio/scanf.c26
-rw-r--r--libc/stdio/setvbuf.c28
-rw-r--r--libc/stdio/snprintf.c30
-rw-r--r--libc/stdio/sprintf.c30
-rw-r--r--libc/stdio/stdchnls.c23
-rw-r--r--libc/stdio/vfprintf.c27
-rw-r--r--libc/stdio/vfscanf.c269
-rw-r--r--libc/stdio/vsnprintf.c246
-rw-r--r--libc/stdio/vsprintf.c19
-rw-r--r--libc/stdio/vsscanf.c131
-rw-r--r--libc/stdlib/Makefile.inc20
-rw-r--r--libc/stdlib/abort.c23
-rw-r--r--libc/stdlib/atoi.c18
-rw-r--r--libc/stdlib/atol.c18
-rw-r--r--libc/stdlib/error.c15
-rw-r--r--libc/stdlib/rand.c24
-rw-r--r--libc/stdlib/strtol.c115
-rw-r--r--libc/stdlib/strtoul.c105
-rw-r--r--libc/string/Makefile.inc20
-rw-r--r--libc/string/memchr.c29
-rw-r--r--libc/string/memcmp.c30
-rw-r--r--libc/string/memcpy.c27
-rw-r--r--libc/string/memmove.c42
-rw-r--r--libc/string/memset.c25
-rw-r--r--libc/string/strcasecmp.c28
-rw-r--r--libc/string/strcat.c24
-rw-r--r--libc/string/strchr.c28
-rw-r--r--libc/string/strcmp.c28
-rw-r--r--libc/string/strcpy.c25
-rw-r--r--libc/string/strdup.c25
-rw-r--r--libc/string/strlen.c27
-rw-r--r--libc/string/strncasecmp.c32
-rw-r--r--libc/string/strncmp.c31
-rw-r--r--libc/string/strncpy.c33
-rw-r--r--libc/string/strstr.c37
-rw-r--r--libc/string/strtok.c45
-rw-r--r--libfdt/Makefile.inc18
-rw-r--r--libfdt/Makefile.libfdt9
-rw-r--r--libfdt/TODO3
-rw-r--r--libfdt/fdt.c213
-rw-r--r--libfdt/fdt.h60
-rw-r--r--libfdt/fdt_ro.c528
-rw-r--r--libfdt/fdt_rw.c465
-rw-r--r--libfdt/fdt_strerror.c96
-rw-r--r--libfdt/fdt_sw.c280
-rw-r--r--libfdt/fdt_wip.c118
-rw-r--r--libfdt/libfdt.h1168
-rw-r--r--libfdt/libfdt_env.h24
-rw-r--r--libfdt/libfdt_internal.h94
-rw-r--r--libfdt/version.lds54
-rw-r--r--libflash/Makefile.inc7
-rw-r--r--libflash/ffs.h159
-rw-r--r--libflash/libffs.c280
-rw-r--r--libflash/libffs.h56
-rw-r--r--libflash/libflash-priv.h213
-rw-r--r--libflash/libflash.c636
-rw-r--r--libflash/libflash.h83
-rw-r--r--libflash/test/Makefile.check20
-rw-r--r--libflash/test/stubs.c16
-rw-r--r--libflash/test/test-flash.c418
-rw-r--r--libpore/Makefile.inc13
-rw-r--r--libpore/fapi_sbe_common.H69
-rw-r--r--libpore/p8_delta_scan_rw.h466
-rw-r--r--libpore/p8_image_help_base.H125
-rw-r--r--libpore/p8_pore_api_custom.h141
-rw-r--r--libpore/p8_pore_table_gen_api.H438
-rw-r--r--libpore/p8_pore_table_gen_api_fixed.C844
-rw-r--r--libpore/p8_pore_table_static_data.c60
-rw-r--r--libpore/pgas.h1169
-rw-r--r--libpore/pore_inline.h881
-rw-r--r--libpore/pore_inline_assembler.c1507
-rw-r--r--libpore/sbe_xip_image.c2562
-rw-r--r--libpore/sbe_xip_image.h1784
-rwxr-xr-xmake_offsets.sh10
-rw-r--r--platforms/Makefile.inc10
-rw-r--r--platforms/bmc/Makefile.inc6
-rw-r--r--platforms/bmc/bmc.h24
-rw-r--r--platforms/bmc/palmetto.c186
-rw-r--r--platforms/bmc/pnor.c87
-rw-r--r--platforms/ibm-fsp/Makefile.inc6
-rw-r--r--platforms/ibm-fsp/apollo.c62
-rw-r--r--platforms/ibm-fsp/common.c196
-rw-r--r--platforms/ibm-fsp/firenze.c247
-rw-r--r--platforms/ibm-fsp/ibm-fsp.h26
-rw-r--r--platforms/ibm-fsp/lxvpd.c298
-rw-r--r--platforms/ibm-fsp/lxvpd.h111
-rw-r--r--platforms/rhesus/Makefile.inc6
-rw-r--r--platforms/rhesus/rhesus.c313
-rw-r--r--skiboot.lds.S143
374 files changed, 81208 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e243b77
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,27 @@
+*~
+*.o
+*.d
+*.rej
+*.swp
+skiboot-nosection.elf
+skiboot.elf
+skiboot.lds
+skiboot.lid
+skiboot.map
+TAGS
+tags
+cscope.out
+asm/asm-offsets.s
+include/asm-offsets.h
+gitid.c
+core/test/run-device
+core/test/run-malloc
+core/test/run-malloc-speed
+core/test/run-mem_region
+core/test/run-mem_region_init
+core/test/run-mem_region_release_unused
+core/test/run-mem_region_release_unused_noalloc
+core/test/run-msg
+core/test/run-trace
+external/dump_trace
+hdata/test/hdata_to_dt
diff --git a/LICENCE b/LICENCE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/LICENCE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..b610222
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,39 @@
+# If you want to build in another directory copy this file there and
+# fill in the following values
+
+#
+# Prefix of cross toolchain, if anything
+# Example: CROSS= powerpc64-unknown-linux-gnu-
+#
+CROSS ?= powerpc64-linux-
+
+#
+# Set to enable SLW bits
+#
+PORE ?= 1
+
+#
+# Optional location of embedded linux kernel file
+# This can be a raw vmlinux, stripped vmlinux or
+# zImage.epapr
+#
+KERNEL ?=
+
+#
+# Where is the source directory, must be a full path (no ~)
+# Example: SRC= /home/me/skiboot
+#
+SRC=$(CURDIR)
+
+#
+# Where to get information about this machine (subdir name)
+#
+DEVSRC=hdata
+
+#
+# default config file, see include config_*.h for more specifics
+#
+CONFIG := config.h
+
+include $(SRC)/Makefile.main
+
diff --git a/Makefile.main b/Makefile.main
new file mode 100644
index 0000000..884c0c2
--- /dev/null
+++ b/Makefile.main
@@ -0,0 +1,154 @@
+# -*-Makefile-*-
+#
+# This is the main Makefile
+
+# Target tools
+CC=$(CROSS)gcc$(POSTFIX)
+LD=$(CROSS)ld$(POSTFIX)
+AS=$(CROSS)as
+AR=$(CROSS)ar
+NM=$(CROSS)nm
+OBJCOPY=$(CROSS)objcopy
+OBJDUMP=$(CROSS)objdump
+SIZE=$(CROSS)size
+LD_TEXT=0x0
+NM += --synthetic
+
+# Base warnings
+CWARNS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ -Werror-implicit-function-declaration -Wdeclaration-after-statement \
+ -Wno-pointer-sign -Wextra -Wno-sign-compare \
+ -Wmissing-prototypes -Wmissing-declarations \
+ -Wwrite-strings -Wcast-align -Wjump-misses-init \
+ -Winit-self \
+ -Wsuggest-attribute=const \
+ -Wsuggest-attribute=noreturn \
+ -Wframe-larger-than=2048 \
+ -Wstack-usage=4096
+
+# Host tools and options
+HOSTCC=gcc
+HOSTEND=$(shell uname -m | sed -e 's/^i.*86$$/LITTLE/' -e 's/^x86.*/LITTLE/' -e 's/^ppc.*/BIG/')
+HOSTCFLAGS=-O1 $(CWARNS) -DHAVE_$(HOSTEND)_ENDIAN
+VALGRIND=valgrind -q --show-reachable=yes --error-exitcode=99
+
+# Target options
+
+OPTS=-Os -ffunction-sections
+DBG=-g
+
+CPPFLAGS := -I$(SRC)/include -Iinclude -MMD -include $(SRC)/include/config.h
+CPPFLAGS += -I$(SRC)/libfdt -I$(SRC)/libflash -I$(SRC)/libc/include -I$(SRC)
+ifeq ($(PORE),1)
+CPPFLAGS += -I$(SRC)/libpore -D__HAVE_LIBPORE__
+endif
+CPPFLAGS += -D__SKIBOOT__ -nostdinc
+CPPFLAGS += -isystem $(shell $(CC) -print-file-name=include)
+CPPFLAGS += -DBITS_PER_LONG=64 -m64 -DHAVE_BIG_ENDIAN
+# Ideally we want -ffreestanding and remove our copy of stdint.h
+# but that means uint64_t becomes an ulong instead of an ullong
+# causing all our printf's to warn ... so leave that alone for now
+#CPPFLAGS += -ffreestanding
+
+CFLAGS := -fno-strict-aliasing -fno-stack-protector -pie
+CFLAGS += $(CWARNS) $(OPTS) $(DBG)
+
+LDFLAGS := -m64 -static -nostdlib -Wl,--gc-sections -pie
+LDFLAGS += -Wl,-Ttext-segment,$(LD_TEXT) -Wl,-N -Wl,--build-id=none
+LDFLAGS += -Wl,--no-multi-toc
+LDRFLAGS=-melf64ppc
+# Debug stuff
+#LDFLAGS += -Wl,-v -Wl,-Map,foomap
+
+AFLAGS := -D__ASSEMBLY__
+
+# Special tool flags:
+# Do not use the floating point unit
+CFLAGS += -msoft-float
+# Do not use string instructions
+CFLAGS += -mno-string
+# do not use load/store multiple word instrcutions
+CFLAGS += -mno-multiple
+# Do not use load/store update. You REALLY do not want to use this!
+# The async safety of the ABI stack depends on the atomicity
+# of update on store.
+#CFLAGS += -mno-update
+
+ifneq ($(KERNEL),)
+CPPFLAGS += -DBUILTIN_KERNEL="$(KERNEL)"
+endif
+
+.SECONDARY:
+
+vpath %.c $(SRC)
+vpath %.S $(SRC)
+
+default: all
+
+include/asm-offsets.h: asm/asm-offsets.s
+ @mkdir -p include
+ $(call Q,GN, $(SRC)/make_offsets.sh $< >$@, $@)
+
+TARGET = skiboot
+
+include $(SRC)/asm/Makefile.inc
+include $(SRC)/core/Makefile.inc
+include $(SRC)/hw/Makefile.inc
+include $(SRC)/platforms/Makefile.inc
+include $(SRC)/libfdt/Makefile.inc
+include $(SRC)/libflash/Makefile.inc
+include $(SRC)/libpore/Makefile.inc
+include $(SRC)/libc/Makefile.inc
+include $(SRC)/ccan/Makefile.inc
+include $(SRC)/$(DEVSRC)/Makefile.inc
+
+all: $(SUBDIRS) $(TARGET).lid $(TARGET).map
+
+OBJS := $(ASM) $(CORE) $(HW) $(PLATFORMS) $(LIBFDT) $(LIBFLASH)
+ifeq ($(PORE),1)
+OBJS += $(LIBPORE)
+endif
+OBJS += $(LIBC) $(CCAN) $(DEVSRC_OBJ) gitid.o
+
+$(TARGET).lid: $(TARGET).elf
+ $(OBJCOPY) -O binary -S $^ $@
+
+$(TARGET).elf: $(OBJS) $(TARGET).lds $(KERNEL)
+ $(call Q,LD, $(CC) $(LDFLAGS) -T $(TARGET).lds $(OBJS) -o $@, $@)
+
+$(TARGET).map: $(TARGET).elf
+ $(call Q,NM, $(NM) -n $< | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)' > $@, $@)
+
+$(SUBDIRS):
+ $(call Q,MKDIR,mkdir $@, $@)
+
+-include $(wildcard *.d)
+-include $(wildcard $(SUBDIRS:%=%/*.d))
+
+# Set V=1 if you want to see everything.
+include $(SRC)/Makefile.rules
+
+gitid.c:
+ @echo "const char gitid[] = \"`cd $(SRC); git describe --always --dirty`\";" > $@
+
+include $(shell find $(SRC)/* -name Makefile.check)
+
+tags:
+ find . -name '*.[chS]' | xargs ctags
+
+TAGS:
+ find . -name '*.[chS]' | xargs etags
+
+.PHONY: tags TAGS check gitid.c
+
+cscope:
+ find . -name '*.[chS]' | xargs cscope
+
+clean:
+ $(RM) *.[odsa] $(SUBDIRS:%=%/*.[odsa])
+ $(RM) $(TARGET).elf $(TARGET).lid $(TARGET).map $(TARGET).lds
+ $(RM) include/asm-offsets.h gitid.c
+
+distclean: clean
+ $(RM) *~ $(SUBDIRS:%=%/*~) include/*~
+
diff --git a/Makefile.rules b/Makefile.rules
new file mode 100644
index 0000000..bfaa1a1
--- /dev/null
+++ b/Makefile.rules
@@ -0,0 +1,53 @@
+#
+# These allow for the build to be less verbose
+#
+
+ifdef V
+ VERBOSE:= $(V)
+else
+ VERBOSE:= 0
+endif
+
+ifeq ($(VERBOSE),1)
+define Q
+ $(2)
+endef
+else
+define Q
+ @echo " [$1] $(3)"
+ @$(2)
+endef
+endif
+
+define cook_aflags
+ $(filter-out $(AFLAGS_SKIP_$(1)), $(CPPFLAGS) $(AFLAGS)) $(AFLAGS_$(1))
+endef
+
+define cook_cflags
+ $(filter-out $(CFLAGS_SKIP_$(1)), $(CPPFLAGS) $(CFLAGS)) $(CFLAGS_$(1))
+endef
+
+%.o : %.S include/asm-offsets.h
+ $(call Q,AS, $(CC) $(call cook_aflags,$@) -c $< -o $@, $@)
+
+%.s : %.S include/asm-offsets.h
+ $(call Q,CC, $(CC) $(call cook_aflags,$@) -E -c $< -o $@, $@)
+
+%.o : %.c
+ $(call Q,CC, $(CC) $(call cook_cflags,$@) -c $< -o $@, $@)
+
+# Force the use of the C compiler, not C++ for the .C files in libpore
+%.o : %.C
+ $(call Q,CC, $(CC) $(call cook_cflags,$@) -x c -c $< -o $@, $@)
+
+%.s : %.c
+ $(call Q,CC, $(CC) $(call cook_cflags,$@) -S -c $< -o $@, $@)
+
+%.i : %.c
+ $(call Q,CC, $(CC) $(call cook_cflags,$@) -E -c $< -o $@, $@)
+
+%built-in.o : %
+ $(call Q,LD, $(LD) $(LDRFLAGS) -r $^ -o $@, $@)
+
+%.lds : %.lds.S
+ $(call Q,CC, $(CC) $(CPPFLAGS) -P -E $< -o $@, $@)
diff --git a/asm/Makefile.inc b/asm/Makefile.inc
new file mode 100644
index 0000000..4c858e2
--- /dev/null
+++ b/asm/Makefile.inc
@@ -0,0 +1,11 @@
+# -*-Makefile-*-
+
+SUBDIRS += asm
+ASM_OBJS = head.o lock.o misc.o kernel-wrapper.o
+ASM=asm/built-in.o
+
+# Add extra dependency to the kernel wrapper
+kernel_wrapper.o : $(KERNEL)
+
+$(ASM): $(ASM_OBJS:%=asm/%)
+
diff --git a/asm/asm-offsets.c b/asm/asm-offsets.c
new file mode 100644
index 0000000..3440054
--- /dev/null
+++ b/asm/asm-offsets.c
@@ -0,0 +1,85 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stddef.h>
+#include <types.h>
+#include <skiboot.h>
+#include "../hdata/spira.h"
+#include <processor.h>
+#include <cpu.h>
+#include <stack.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n#define " #sym " %0 /* " #val " */" : : "i" (val))
+
+#define OFFSET(sym, str, mem) \
+ DEFINE(sym, offsetof(struct str, mem))
+
+int main(void)
+{
+ OFFSET(SPIRA_ACTUAL_SIZE, spira, reserved);
+
+ OFFSET(CPUTHREAD_PIR, cpu_thread, pir);
+ OFFSET(CPUTHREAD_SAVE_R1, cpu_thread, save_r1);
+ OFFSET(CPUTHREAD_STATE, cpu_thread, state);
+
+ OFFSET(STACK_TYPE, stack_frame, type);
+ OFFSET(STACK_LOCALS, stack_frame, locals);
+ OFFSET(STACK_GPR0, stack_frame, gpr[0]);
+ OFFSET(STACK_GPR1, stack_frame, gpr[1]);
+ OFFSET(STACK_GPR2, stack_frame, gpr[2]);
+ OFFSET(STACK_GPR3, stack_frame, gpr[3]);
+ OFFSET(STACK_GPR4, stack_frame, gpr[4]);
+ OFFSET(STACK_GPR5, stack_frame, gpr[5]);
+ OFFSET(STACK_GPR6, stack_frame, gpr[6]);
+ OFFSET(STACK_GPR7, stack_frame, gpr[7]);
+ OFFSET(STACK_GPR8, stack_frame, gpr[8]);
+ OFFSET(STACK_GPR9, stack_frame, gpr[9]);
+ OFFSET(STACK_GPR10, stack_frame, gpr[10]);
+ OFFSET(STACK_GPR11, stack_frame, gpr[11]);
+ OFFSET(STACK_GPR12, stack_frame, gpr[12]);
+ OFFSET(STACK_GPR13, stack_frame, gpr[13]);
+ OFFSET(STACK_GPR14, stack_frame, gpr[14]);
+ OFFSET(STACK_GPR15, stack_frame, gpr[15]);
+ OFFSET(STACK_GPR16, stack_frame, gpr[16]);
+ OFFSET(STACK_GPR17, stack_frame, gpr[17]);
+ OFFSET(STACK_GPR18, stack_frame, gpr[18]);
+ OFFSET(STACK_GPR19, stack_frame, gpr[19]);
+ OFFSET(STACK_GPR20, stack_frame, gpr[20]);
+ OFFSET(STACK_GPR21, stack_frame, gpr[21]);
+ OFFSET(STACK_GPR22, stack_frame, gpr[22]);
+ OFFSET(STACK_GPR23, stack_frame, gpr[23]);
+ OFFSET(STACK_GPR24, stack_frame, gpr[24]);
+ OFFSET(STACK_GPR25, stack_frame, gpr[25]);
+ OFFSET(STACK_GPR26, stack_frame, gpr[26]);
+ OFFSET(STACK_GPR27, stack_frame, gpr[27]);
+ OFFSET(STACK_GPR28, stack_frame, gpr[28]);
+ OFFSET(STACK_GPR29, stack_frame, gpr[29]);
+ OFFSET(STACK_GPR30, stack_frame, gpr[30]);
+ OFFSET(STACK_GPR31, stack_frame, gpr[31]);
+
+ OFFSET(STACK_CR, stack_frame, cr);
+ OFFSET(STACK_XER, stack_frame, xer);
+ OFFSET(STACK_CTR, stack_frame, ctr);
+ OFFSET(STACK_LR, stack_frame, lr);
+ OFFSET(STACK_PC, stack_frame, pc);
+ OFFSET(STACK_CFAR, stack_frame, cfar);
+ OFFSET(STACK_SRR0, stack_frame, srr0);
+ OFFSET(STACK_SRR1, stack_frame, srr1);
+ DEFINE(STACK_FRAMESIZE, sizeof(struct stack_frame));
+
+ return 0;
+}
diff --git a/asm/head.S b/asm/head.S
new file mode 100644
index 0000000..37a059d
--- /dev/null
+++ b/asm/head.S
@@ -0,0 +1,996 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <asm-utils.h>
+#include <asm-offsets.h>
+#include <mem-map.h>
+#include <processor.h>
+#include <opal.h>
+#include <stack.h>
+
+#define EPAPR_MAGIC 0x65504150
+
+/* Power management instructions */
+#define PPC_INST_NAP .long 0x4c000364
+#define PPC_INST_SLEEP .long 0x4c0003a4
+#define PPC_INST_RVWINKLE .long 0x4c0003e4
+
+#define EXCEPTION(nr) \
+ .= nr; \
+ mtsprg0 %r3; \
+ mfspr %r3,SPR_CFAR; \
+ b . ;
+
+
+/**
+ * patch_exception() makes assumptions about this macro, in order to extract
+ * the correct stack during MC. If you update this, also check the offset and
+ * the patch code in that function.
+ */
+#define GET_STACK(stack_reg,pir_reg) \
+ sldi stack_reg,pir_reg,STACK_SHIFT; \
+ addis stack_reg,stack_reg,CPU_STACKS_OFFSET@ha; \
+ addi stack_reg,stack_reg,CPU_STACKS_OFFSET@l;
+
+#define GET_CPU() \
+ clrrdi %r13,%r1,STACK_SHIFT
+
+#define SAVE_GPR(reg,sp) std %r##reg,STACK_GPR##reg(sp)
+#define REST_GPR(reg,sp) ld %r##reg,STACK_GPR##reg(sp)
+
+ .section ".head","ax"
+
+ . = 0
+.global __head
+__head:
+ /* When booted as an OPAL LID, this is a pointer to the OPAL
+ * variant of the NACA
+ */
+ .llong opal_naca
+
+ /* This entry point is used when booting with a flat device-tree
+ * pointer in r3
+ */
+ . = 0x10
+.global fdt_entry
+fdt_entry:
+ mr %r27,%r3
+ li %r25,0
+ b boot_entry
+
+ /* This is a pointer to a descriptor used by debugging tools
+ * on the service processor to get to various trace buffers
+ */
+ . = 0x80
+ .llong debug_descriptor
+
+ /* This is our boot semaphore used for CPUs to sync, it has to be
+ * at an easy to locate address (without relocation) since we
+ * need to get at it very early, before we apply our relocs
+ */
+ . = 0xf0
+boot_sem:
+ .long 0
+
+ /* And this is a boot flag used to kick secondaries into the
+ * main code.
+ */
+boot_flag:
+ .long 0
+
+ /* This is used to trigger an assert() and in turn an ATTN
+ * in skiboot when a special sequence is written at this
+ * address. For testing purposes only.
+ */
+ . = 0xf8
+.global attn_trigger
+attn_trigger:
+ .long 0
+
+ /* This is the host initiated reset trigger for test */
+ . = 0xfc
+.global hir_trigger
+hir_trigger:
+ .long 0
+
+ . = 0x100
+ /* BML entry, load up r3 with device tree location */
+ li %r3, 0
+ oris %r3, %r3, 0xa
+ b fdt_entry /* hack for lab boot */
+
+ /* Entry point set by the FSP */
+ .= 0x180
+ li %r27,0
+ li %r25,0
+ b boot_entry
+
+ /* More exception stubs */
+ EXCEPTION(0x200)
+ EXCEPTION(0x300)
+ EXCEPTION(0x380)
+ EXCEPTION(0x400)
+ EXCEPTION(0x480)
+ EXCEPTION(0x500)
+ EXCEPTION(0x600)
+ EXCEPTION(0x700)
+ EXCEPTION(0x800)
+ EXCEPTION(0x900)
+ EXCEPTION(0x980)
+ EXCEPTION(0xa00)
+ EXCEPTION(0xb00)
+ EXCEPTION(0xc00)
+ EXCEPTION(0xd00)
+ EXCEPTION(0xe00)
+ EXCEPTION(0xe20)
+ EXCEPTION(0xe40)
+ EXCEPTION(0xe50)
+ EXCEPTION(0xe60)
+ EXCEPTION(0xf00)
+ EXCEPTION(0xf20)
+ EXCEPTION(0xf40)
+ EXCEPTION(0x1000)
+ EXCEPTION(0x1100)
+ EXCEPTION(0x1200)
+ EXCEPTION(0x1300)
+ EXCEPTION(0x1400)
+ EXCEPTION(0x1500)
+ EXCEPTION(0x1600)
+ EXCEPTION(0x1700)
+ EXCEPTION(0x1800)
+ EXCEPTION(0x1900)
+ EXCEPTION(0x1a00)
+ EXCEPTION(0x1b00)
+ EXCEPTION(0x1c00)
+ EXCEPTION(0x1d00)
+ EXCEPTION(0x1e00)
+ EXCEPTION(0x1f00)
+
+ .= 0x2000
+ /* This is the OPAL branch table. It's populated at boot time
+ * with function pointers to the various OPAL functions from
+ * the content of the .opal_table section, indexed by Token.
+ */
+.global opal_branch_table
+opal_branch_table:
+ .space 8 * (OPAL_LAST + 1)
+
+/* Stores the offset we were started from. Used later on if we want to
+ * read any unrelocated code/data such as the built-in kernel image
+ */
+.global boot_offset
+boot_offset:
+ .llong 0
+
+/*
+ *
+ * Boot time entry point from FSP
+ *
+ * All CPUs come here
+ *
+ * Boot code NV register usage:
+ *
+ * r31 : Boot PIR
+ * r30 : Current running offset
+ * r29 : Target address
+ * r28 : PVR
+ * r27 : DTB pointer (or NULL)
+ * r26 : PIR thread mask
+ * r25 : Selected master CPU (OPAL boot)
+ */
+.global boot_entry
+boot_entry:
+ /* Check PVR and set some CR bits */
+ mfspr %r28,SPR_PVR
+ li %r26,3 /* Default to SMT4 */
+ srdi %r3,%r28,16
+ cmpwi cr0,%r3,PVR_TYPE_P7
+ beq 1f
+ cmpwi cr0,%r3,PVR_TYPE_P7P
+ beq 1f
+ cmpwi cr0,%r3,PVR_TYPE_P8
+ beq 2f
+ cmpwi cr0,%r3,PVR_TYPE_P8E
+ beq 2f
+ attn /* Unsupported CPU type... what do we do ? */
+
+ /* P8 -> 8 threads */
+2: li %r26,7
+
+ /* Get our reloc offset into r30 */
+1: bcl 20,31,$+4
+1: mflr %r30
+ subi %r30,%r30,(1b - __head)
+
+ /* Store reloc offset in boot_offset */
+ LOAD_IMM32(%r3, boot_offset - __head)
+ add %r3,%r3,%r30
+ std %r30,0(%r3)
+
+ /* Get ourselves a TOC & relocate it to our target address */
+ LOAD_IMM32(%r2,__toc_start - __head)
+ LOAD_IMM64(%r29, SKIBOOT_BASE)
+ add %r2,%r2,%r29
+
+ /* Fixup our MSR (remove TA) */
+ LOAD_IMM64(%r3, (MSR_HV | MSR_SF))
+ mtmsrd %r3,0
+
+ /* Check our PIR, avoid threads */
+ mfspr %r31,SPR_PIR
+ and. %r0,%r31,%r26
+ bne secondary_wait
+
+ /* Initialize per-core SPRs */
+ bl init_shared_sprs
+
+ /* Pick a boot CPU, cpu index in r31 */
+ LOAD_IMM32(%r3, boot_sem - __head)
+ add %r3,%r3,%r30
+1: lwarx %r4,0,%r3
+ addi %r0,%r4,1
+ stwcx. %r0,0,%r3
+ bne 1b
+ isync
+ cmpwi cr0,%r4,0
+ bne secondary_wait
+
+ /* Make sure we are in SMT medium */
+ smt_medium
+
+ /* Initialize thread SPRs */
+ bl init_replicated_sprs
+
+ /* Check if we need to copy ourselves up and update %r30 to
+ * be our new offset
+ */
+ cmpd %r29,%r30
+ beq 2f
+ LOAD_IMM32(%r3, _sbss - __head)
+ srdi %r3,%r3,3
+ mtctr %r3
+ mr %r4,%r30
+ mr %r15,%r30
+ mr %r30,%r29
+1: ld %r0,0(%r4)
+ std %r0,0(%r29)
+ addi %r29,%r29,8
+ addi %r4,%r4,8
+ bdnz 1b
+ sync
+ icbi 0,%r29
+ sync
+ isync
+ LOAD_IMM32(%r3, 2f - __head)
+ add %r3,%r3,%r30
+ mtctr %r3
+ bctr
+
+ /* Get ready for C code: get a stack */
+2: GET_STACK(%r1,%r31)
+
+ /* Clear up initial frame */
+ li %r3,0
+ std %r3,0(%r1)
+ std %r3,8(%r1)
+ std %r3,16(%r1)
+
+ /* Relocate ourselves */
+ bl call_relocate
+
+ /* Tell secondaries to move to second stage (relocated) spin loop */
+ LOAD_IMM32(%r3, boot_flag - __head)
+ add %r3,%r3,%r15
+ li %r0,1
+ stw %r0,0(%r3)
+
+ /* Clear BSS */
+ li %r0,0
+ LOAD_ADDR_FROM_TOC(%r3, _sbss)
+ LOAD_ADDR_FROM_TOC(%r4, _ebss)
+ subf %r4,%r3,%r4
+ srdi %r4,%r4,3
+ mtctr %r4
+1: std %r0,0(%r3)
+ addi %r3,%r3,8
+ bdnz 1b
+
+ /* Jump to C */
+ GET_CPU()
+ mr %r3,%r27
+ mr %r4,%r25
+ bl main_cpu_entry
+ b .
+
+ /* Secondary CPUs wait here r31 is PIR */
+secondary_wait:
+ /* The primary might be in the middle of relocating us,
+ * so first we spin on the boot_flag
+ */
+ LOAD_IMM32(%r3, boot_flag - __head)
+ add %r3,%r3,%r30
+1: smt_very_low
+ lwz %r0,0(%r3)
+ cmpdi %r0,0
+ beq 1b
+
+ /* Init some registers */
+ bl init_replicated_sprs
+
+ /* Switch to new runtime address */
+ mr %r30,%r29
+ LOAD_IMM32(%r3, 1f - __head)
+ add %r3,%r3,%r30
+ mtctr %r3
+ isync
+ bctr
+1:
+ /* Now wait for cpu_secondary_start to be set */
+ LOAD_ADDR_FROM_TOC(%r3, cpu_secondary_start)
+1: smt_very_low
+ ld %r0,0(%r3)
+ cmpdi %r0,0
+ beq 1b
+
+ smt_medium
+
+ /* Check our PIR is in bound */
+ LOAD_ADDR_FROM_TOC(%r5, cpu_max_pir)
+ lwz %r5,0(%r5)
+ cmpw %r31,%r5
+ bgt- secondary_not_found
+
+ /* Get our stack, cpu thread, and jump to C */
+ GET_STACK(%r1,%r31)
+ li %r0,0
+ std %r0,0(%r1)
+ std %r0,16(%r1)
+ GET_CPU()
+
+ bl secondary_cpu_entry
+ b .
+
+ /* Not found... what to do ? set some global error ? */
+secondary_not_found:
+ smt_very_low
+ b .
+
+call_relocate:
+ mflr %r14
+ LOAD_IMM32(%r4,__dynamic_start - __head)
+ LOAD_IMM32(%r5,__rela_dyn_start - __head)
+ add %r4,%r4,%r30
+ add %r5,%r5,%r30
+ mr %r3,%r30
+ bl relocate
+ cmpwi %r3,0
+ beq 1f
+ mtlr %r14
+ blr
+1: /* Fatal relocate failure */
+ b .
+
+/* This is a little piece of code that is copied down to
+ * 0x100 when doing a "fast reset"
+ */
+.global fast_reset_patch_start
+fast_reset_patch_start:
+ smt_medium
+ LOAD_IMM64(%r30, SKIBOOT_BASE)
+ LOAD_IMM32(%r3, fast_reset_entry - __head)
+ add %r3,%r30,%r3
+ mtctr %r3
+ bctr
+.global fast_reset_patch_end
+fast_reset_patch_end:
+
+/* Fast reset code. We clean up the TLB and a few SPRs and
+ * return to C code. All CPUs do that, the CPU triggering the
+ * reset does it to itself last. The C code will sort out who
+ * the master is. We come from the trampoline above with
+ * r30 containing SKIBOOT_BASE
+ */
+fast_reset_entry:
+ /* Clear out SLB */
+ li %r6,0
+ slbmte %r6,%r6
+ slbia
+ ptesync
+
+ /* Get PIR */
+ mfspr %r31,SPR_PIR
+
+ /* Get a stack and restore r13 */
+ GET_STACK(%r1,%r31)
+ li %r3,0
+ std %r3,0(%r1)
+ std %r3,8(%r1)
+ std %r3,16(%r1)
+ GET_CPU()
+
+ /* Get our TOC */
+ addis %r2,%r30,(__toc_start - __head)@ha
+ addi %r2,%r2,(__toc_start - __head)@l
+
+ /* Go to C ! */
+ bl fast_reboot
+ b .
+
+.global cleanup_tlb
+cleanup_tlb:
+ /* Clean the TLB */
+ li %r3,128
+ mtctr %r3
+ li %r4,0x800 /* IS field = 0b10 */
+ ptesync
+1: tlbiel %r4
+ addi %r4,%r4,0x1000
+ bdnz 1b
+ ptesync
+
+#define FIXUP_ENDIAN \
+ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
+ b $+36; /* Skip trampoline if endian is good */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x1c004a39; /* addi r10,r10,28 */ \
+ .long 0xa600607d; /* mfmsr r11 */ \
+ .long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0xa6035a7d; /* mtsrr0 r10 */ \
+ .long 0xa6037b7d; /* mtsrr1 r11 */ \
+ .long 0x2400004c /* rfid */
+
+.global enter_rvwinkle
+enter_rvwinkle:
+ /* Before entering rvwinkle, we create a stack frame
+ * and save our non-volatile registers.
+ *
+ * We also save these SPRs:
+ *
+ * - HSPRG0 in GPR0 slot
+ * - HSPRG1 in GPR1 slot
+ *
+ * - xxx TODO: HIDs
+ * - TODO: Mask MSR:ME during the process
+ */
+ mflr %r0
+ std %r0,16(%r1)
+ stdu %r1,-STACK_FRAMESIZE(%r1)
+ SAVE_GPR(2,%r1)
+ SAVE_GPR(14,%r1)
+ SAVE_GPR(15,%r1)
+ SAVE_GPR(16,%r1)
+ SAVE_GPR(17,%r1)
+ SAVE_GPR(18,%r1)
+ SAVE_GPR(19,%r1)
+ SAVE_GPR(20,%r1)
+ SAVE_GPR(21,%r1)
+ SAVE_GPR(22,%r1)
+ SAVE_GPR(23,%r1)
+ SAVE_GPR(24,%r1)
+ SAVE_GPR(25,%r1)
+ SAVE_GPR(26,%r1)
+ SAVE_GPR(27,%r1)
+ SAVE_GPR(28,%r1)
+ SAVE_GPR(29,%r1)
+ SAVE_GPR(30,%r1)
+ SAVE_GPR(31,%r1)
+ mfcr %r3
+ mfxer %r4
+ mfspr %r5,SPR_HSPRG0
+ mfspr %r6,SPR_HSPRG1
+ stw %r3,STACK_CR(%r1)
+ stw %r4,STACK_XER(%r1)
+ std %r5,STACK_GPR0(%r1)
+ std %r6,STACK_GPR1(%r1)
+
+ /* Save stack pointer in struct cpu_thread */
+ std %r1,CPUTHREAD_SAVE_R1(%r13)
+
+ /* rvwinkle sequence */
+ ptesync
+0: ld %r0,CPUTHREAD_SAVE_R1(%r13)
+ cmpd cr0,%r0,%r0
+ bne 0b
+ PPC_INST_RVWINKLE
+ b .
+
+/* This is a little piece of code that is copied down to
+ * 0x100 when doing a "rvwinkle reinit"
+ */
+.global rvwinkle_patch_start
+rvwinkle_patch_start:
+ FIXUP_ENDIAN
+ smt_medium
+ LOAD_IMM64(%r30, SKIBOOT_BASE)
+ LOAD_IMM32(%r3, rvwinkle_restore - __head)
+ add %r3,%r30,%r3
+ mtctr %r3
+ bctr
+.global rvwinkle_patch_end
+rvwinkle_patch_end:
+
+rvwinkle_restore:
+ /* Get PIR */
+ mfspr %r31,SPR_PIR
+
+ /* Initialize per-core SPRs
+ *
+ * XXX We do it on each thread ... oh well, improve that later
+ */
+ bl init_shared_sprs
+
+ /* Initialize thread SPRs */
+ bl init_replicated_sprs
+
+ /* Get that CPU stack base and use it to restore r13 */
+ GET_STACK(%r1,%r31)
+ GET_CPU()
+
+ /* Restore original stack pointer */
+ ld %r1,CPUTHREAD_SAVE_R1(%r13)
+
+ /* Restore more stuff */
+ lwz %r3,STACK_CR(%r1)
+ lwz %r4,STACK_XER(%r1)
+ ld %r5,STACK_GPR0(%r1)
+ ld %r6,STACK_GPR1(%r1)
+ mtcr %r3
+ mtxer %r4
+ mtspr SPR_HSPRG0,%r5
+ mtspr SPR_HSPRG1,%r6
+ REST_GPR(2,%r1)
+ REST_GPR(14,%r1)
+ REST_GPR(15,%r1)
+ REST_GPR(16,%r1)
+ REST_GPR(17,%r1)
+ REST_GPR(18,%r1)
+ REST_GPR(19,%r1)
+ REST_GPR(20,%r1)
+ REST_GPR(21,%r1)
+ REST_GPR(22,%r1)
+ REST_GPR(23,%r1)
+ REST_GPR(24,%r1)
+ REST_GPR(25,%r1)
+ REST_GPR(26,%r1)
+ REST_GPR(27,%r1)
+ REST_GPR(28,%r1)
+ REST_GPR(29,%r1)
+ REST_GPR(30,%r1)
+ REST_GPR(31,%r1)
+
+ /* Get LR back, pop stack and return */
+ addi %r1,%r1,STACK_FRAMESIZE
+ ld %r0,16(%r1)
+ mtlr %r0
+ blr
+
+/* Functions to initialize replicated and shared SPRs to sane
+ * values. This is called at boot and on soft-reset
+ */
+.global init_shared_sprs
+init_shared_sprs:
+ li %r0,0
+ mtspr SPR_SDR1, %r0
+ mtspr SPR_AMOR, %r0
+
+ mfspr %r3,SPR_PVR
+ srdi %r3,%r3,16
+ cmpwi cr0,%r3,PVR_TYPE_P7
+ beq 1f
+ cmpwi cr0,%r3,PVR_TYPE_P7P
+ beq 2f
+ cmpwi cr0,%r3,PVR_TYPE_P8E
+ beq 3f
+ cmpwi cr0,%r3,PVR_TYPE_P8
+ beq 3f
+ /* Unsupported CPU type... what do we do ? */
+ b 9f
+
+1: /* P7 */
+ /* TSCR: Value from pHyp */
+ LOAD_IMM32(%r3,0x880DE880)
+ mtspr SPR_TSCR, %r3
+ b 9f
+
+2: /* P7+ */
+ /* TSCR: Recommended value by HW folks */
+ LOAD_IMM32(%r3,0x88CDE880)
+ mtspr SPR_TSCR, %r3
+ b 9f
+
+3: /* P8E/P8 */
+ /* TSCR: Recommended value by HW folks */
+ LOAD_IMM32(%r3,0x8ACC6880)
+ mtspr SPR_TSCR, %r3
+ mfspr %r3,SPR_LPCR
+ rldicr %r3,%r3,12,60
+ ori %r3,%r3,4
+ rldicr %r3,%r3,52,63
+ mtspr SPR_LPCR,%r3
+ sync
+ isync
+ /* HID0: Clear bit 13 (enable core recovery) */
+ mfspr %r3,SPR_HID0
+ li %r0,1
+ sldi %r0,%r0,(63-13)
+ andc %r3,%r3,%r0
+ sync
+ mtspr SPR_HID0,%r3
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ isync
+ /* HMEER: Enable HMIs for core recovery and TOD errors. */
+ LOAD_IMM64(%r0,SPR_HMEER_HMI_ENABLE_MASK)
+ mfspr %r3,SPR_HMEER
+ or %r3,%r3,%r0
+ sync
+ mtspr SPR_HMEER,%r3
+ isync
+9: blr
+
+.global init_replicated_sprs
+init_replicated_sprs:
+ /* LPCR: sane value */
+ LOAD_IMM64(%r3,0x0070000000000004)
+ mtspr SPR_LPCR, %r3
+
+ /* XXX TODO: Add more */
+ blr
+
+/*
+ *
+ * NACA structure, accessed by the FPS to find the SPIRA
+ *
+ */
+ . = 0x4000
+.global naca
+naca:
+ .llong 0 /* 0x0000 : Reserved */
+ .llong 0 /* 0x0008 : Reserved */
+ .llong 0 /* 0x0010 : Reserved */
+ .llong hv_release_data /* 0x0018 : HV release data */
+ .llong 0 /* 0x0020 : Reserved */
+ .llong 0 /* 0x0028 : Reserved */
+ .llong spira /* 0x0030 : SP Interface Root */
+ .llong hv_lid_load_table /* 0x0038 : LID load table */
+ .llong 0 /* 0x0040 : Reserved */
+ .space 68
+ .long 0 /* 0x008c : Reserved */
+ .space 16
+ .long SPIRA_ACTUAL_SIZE /* 0x00a0 : Actual size of SPIRA */
+ .space 28
+ .llong 0 /* 0x00c0 : resident module loadmap */
+ .space 136
+ .llong 0 /* 0x0150 : reserved */
+ .space 40
+ .llong 0 /* 0x0180 : reserved */
+ .space 36
+ .long 0 /* 0x01ac : control flags */
+ .byte 0 /* 0x01b0 : reserved */
+ .space 4
+ .byte 0 /* 0x01b5 : default state for SW attn */
+ .space 1
+ .byte 0x01 /* 0x01b7 : PCIA format */
+ .space 0xe48
+
+ .balign 0x10
+hv_release_data:
+ .space 58
+ .llong 0x666 /* VRM ? */
+
+ .balign 0x10
+hv_lid_load_table:
+ .long 0x10
+ .long 0x10
+ .long 0
+ .long 0
+
+/*
+ *
+ * OPAL variant of NACA
+ *
+ */
+.global opal_naca
+opal_naca:
+ .llong opal_boot_trampoline /* Primary entry (used ?) */
+ .llong opal_boot_trampoline /* Secondary entry (used ?) */
+ .llong spira /* Spira pointer */
+ .llong 0 /* Load address */
+ .llong opal_boot_trampoline /* 0x180 trampoline */
+ .llong 0 /* More stuff as seen in objdump ...*/
+ .llong 0
+ .llong 0
+ .llong 0
+
+ /* The FSP seems to ignore our primary/secondary entry
+ * points and instead copy that bit down to 0x180 and
+ * patch the first instruction to get our expected
+ * boot CPU number. We ignore that patching for now and
+ * got to the same entry we use for pHyp and FDT HB.
+ */
+opal_boot_trampoline:
+ li %r25,0
+ li %r27,-1
+ ba boot_entry - __head
+
+/*
+ *
+ * OPAL entry point from operating system
+ *
+ * Register usage:
+ *
+ * r0: Token
+ * r2: OPAL Base
+ * r3..r11: Args
+ * r12: Scratch
+ * r13..r31: Preserved
+ *
+ */
+ .balign 0x10
+.global opal_entry
+opal_entry:
+ /* Get our per CPU stack */
+ mfspr %r12,SPR_PIR
+ GET_STACK(%r12,%r12)
+ stdu %r12,-STACK_FRAMESIZE(%r12)
+
+ /* Save caller r1, establish new r1 */
+ std %r1,STACK_GPR1(%r12)
+ mr %r1,%r12
+
+ /* May save arguments for tracing */
+#ifdef OPAL_TRACE_ENTRY
+ std %r3,STACK_GPR3(%r1)
+ std %r4,STACK_GPR4(%r1)
+ std %r5,STACK_GPR5(%r1)
+ std %r6,STACK_GPR6(%r1)
+ std %r7,STACK_GPR7(%r1)
+ std %r8,STACK_GPR8(%r1)
+ std %r9,STACK_GPR9(%r1)
+ std %r10,STACK_GPR10(%r1)
+ std %r11,STACK_GPR11(%r1)
+#endif
+ /* Save Token (r0), LR and r13 */
+ mflr %r12
+ std %r0,STACK_GPR0(%r1)
+ std %r13,STACK_GPR13(%r1)
+ std %r12,STACK_LR(%r1)
+
+ /* Get the CPU thread */
+ GET_CPU()
+
+ /* Mark the stack frame */
+ li %r12,STACK_ENTRY_OPAL_API
+ std %r12,STACK_TYPE(%r1)
+
+ /* Get our TOC */
+ addis %r2,%r2,(__toc_start - __head)@ha
+ addi %r2,%r2,(__toc_start - __head)@l
+
+ /* Check for a reboot in progress */
+ LOAD_ADDR_FROM_TOC(%r12, reboot_in_progress)
+ lbz %r12,0(%r12)
+ cmpwi %r12,0
+ bne 3f
+
+#ifdef OPAL_TRACE_ENTRY
+ mr %r3,%r1
+ bl opal_trace_entry
+ ld %r0,STACK_GPR0(%r1)
+ ld %r3,STACK_GPR3(%r1)
+ ld %r4,STACK_GPR4(%r1)
+ ld %r5,STACK_GPR5(%r1)
+ ld %r6,STACK_GPR6(%r1)
+ ld %r7,STACK_GPR7(%r1)
+ ld %r8,STACK_GPR8(%r1)
+ ld %r9,STACK_GPR9(%r1)
+ ld %r10,STACK_GPR10(%r1)
+ ld %r11,STACK_GPR11(%r1)
+#endif /* OPAL_TRACE_ENTRY */
+
+ /* Convert our token into a table entry and get the
+ * function pointer. Also check the token.
+ */
+ cmpldi %r0,OPAL_LAST
+ bgt- 2f
+ sldi %r0,%r0,3
+ LOAD_ADDR_FROM_TOC(%r12, opal_branch_table)
+ ldx %r0,%r12,%r0
+ cmpldi %r0,0
+ beq- 2f
+ mtctr %r0
+
+ /* Jump ! */
+ bctrl
+
+1: ld %r12,STACK_LR(%r1)
+ mtlr %r12
+ ld %r13,STACK_GPR13(%r1)
+ ld %r1,STACK_GPR1(%r1)
+ blr
+
+2: /* Bad token */
+ ld %r3,STACK_GPR0(%r1)
+ bl opal_bad_token
+ b 1b
+
+3: /* Reboot in progress, reject all calls */
+ li %r3,OPAL_BUSY
+ b 1b
+
+.global start_kernel
+start_kernel:
+ sync
+ icbi 0,%r3
+ sync
+ isync
+ mtctr %r3
+ mr %r3,%r4
+ LOAD_IMM64(%r8,SKIBOOT_BASE);
+ LOAD_IMM32(%r10, opal_entry - __head)
+ add %r9,%r8,%r10
+ LOAD_IMM32(%r6, EPAPR_MAGIC)
+ addi %r7,%r5,1
+ li %r4,0
+ li %r5,0
+ bctr
+
+ .global start_kernel32
+start_kernel32:
+ mfmsr %r10
+ clrldi %r10,%r10,1
+ mtmsrd %r10,0
+ sync
+ isync
+ b start_kernel
+
+.global start_kernel_secondary
+start_kernel_secondary:
+ sync
+ isync
+ mtctr %r3
+ mfspr %r3,SPR_PIR
+ bctr
+
+ .global exc_primary_start
+exc_primary_start:
+ mtspr SPR_HSPRG1,%r1
+ mfspr %r1,SPR_CFAR
+0: b .
+ .global exc_primary_end
+exc_primary_end:
+
+ .global exc_primary_patch_branch
+exc_primary_patch_branch:
+ .long 0b - exc_primary_start
+
+ .global exc_secondary_start
+exc_secondary_start:
+ mtspr SPR_CFAR,%r1
+ mfspr %r1,SPR_PIR
+0: GET_STACK(%r1,%r1)
+ stdu %r1,-STACK_FRAMESIZE(%r1)
+ std %r3,STACK_GPR3(%r1)
+ mfspr %r3,SPR_HSPRG1
+ std %r3,STACK_GPR1(%r1)
+ mfspr %r3,SPR_CFAR
+ std %r3,STACK_CFAR(%r1)
+1: mfspr %r3,SPR_SRR0
+ std %r3,STACK_SRR0(%r1)
+2: mfspr %r3,SPR_SRR1
+ std %r3,STACK_SRR1(%r1)
+ mflr %r3
+ std %r3,STACK_LR(%r1)
+ LOAD_IMM32(%r3,exception_entry_common - __head);
+ addis %r3,%r3,SKIBOOT_BASE@h
+ mtlr %r3
+3: li %r3,0
+ blrl /* XXX Use a BH=01 variant to avoid link stack problems */
+ ld %r3,STACK_LR(%r1)
+ mtlr %r3
+ ld %r3,STACK_SRR0(%r1)
+4: mtspr SPR_SRR0,%r3
+ ld %r3,STACK_SRR1(%r1)
+5: mtspr SPR_SRR1,%r3
+ ld %r3,STACK_GPR3(%r1)
+ ld %r1,STACK_GPR1(%r1)
+6: rfid
+ .global exc_secondary_end
+exc_secondary_end:
+
+ .global exc_secondary_patch_stack
+exc_secondary_patch_stack:
+ .long 0b - exc_secondary_start
+ .global exc_secondary_patch_mfsrr0
+exc_secondary_patch_mfsrr0:
+ .long 1b - exc_secondary_start
+ .global exc_secondary_patch_mfsrr1
+exc_secondary_patch_mfsrr1:
+ .long 2b - exc_secondary_start
+ .global exc_secondary_patch_type
+exc_secondary_patch_type:
+ .long 3b - exc_secondary_start
+ .global exc_secondary_patch_mtsrr0
+exc_secondary_patch_mtsrr0:
+ .long 4b - exc_secondary_start
+ .global exc_secondary_patch_mtsrr1
+exc_secondary_patch_mtsrr1:
+ .long 5b - exc_secondary_start
+ .global exc_secondary_patch_rfid
+exc_secondary_patch_rfid:
+ .long 6b - exc_secondary_start
+
+ /* The rest of the exception entry code */
+exception_entry_common:
+ std %r3,STACK_TYPE(%r1)
+
+ /* We save the exception return LR in the stack-locals area */
+ mflr %r3
+ std %r3,STACK_LOCALS(%r1)
+
+ /* Save more stuff */
+ std %r0,STACK_GPR0(%r1)
+ std %r2,STACK_GPR2(%r1)
+ std %r4,STACK_GPR4(%r1)
+ std %r5,STACK_GPR5(%r1)
+ std %r6,STACK_GPR6(%r1)
+ std %r7,STACK_GPR7(%r1)
+ std %r8,STACK_GPR8(%r1)
+ std %r9,STACK_GPR9(%r1)
+ std %r10,STACK_GPR10(%r1)
+ std %r11,STACK_GPR11(%r1)
+ std %r12,STACK_GPR12(%r1)
+ std %r13,STACK_GPR13(%r1)
+ mfcr %r3
+ stw %r3,STACK_CR(%r1)
+ mfctr %r3
+ std %r3,STACK_CTR(%r1)
+
+ GET_CPU()
+
+ LOAD_IMM64(%r2, SKIBOOT_BASE)
+ addis %r2,%r2,(__toc_start - __head)@ha
+ addi %r2,%r2,(__toc_start - __head)@l
+
+ mr %r3,%r1
+ bl exception_entry
+
+ ld %r3,STACK_CTR(%r1)
+ lwz %r4,STACK_CR(%r1)
+ mtctr %r3
+ mtcr %r4
+
+ ld %r0,STACK_GPR0(%r1)
+ ld %r2,STACK_GPR2(%r1)
+ ld %r4,STACK_GPR4(%r1)
+ ld %r5,STACK_GPR5(%r1)
+ ld %r6,STACK_GPR6(%r1)
+ ld %r7,STACK_GPR7(%r1)
+ ld %r8,STACK_GPR8(%r1)
+ ld %r9,STACK_GPR9(%r1)
+ ld %r10,STACK_GPR10(%r1)
+ ld %r11,STACK_GPR11(%r1)
+ ld %r12,STACK_GPR12(%r1)
+ ld %r13,STACK_GPR13(%r1)
+
+ ld %r3,STACK_LOCALS(%r1)
+ mtlr %r3
+ blr
diff --git a/asm/kernel-wrapper.S b/asm/kernel-wrapper.S
new file mode 100644
index 0000000..a9dc3d6
--- /dev/null
+++ b/asm/kernel-wrapper.S
@@ -0,0 +1,26 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define stringify(expr) stringify_1(expr)
+/* Double-indirection required to stringify expansions */
+#define stringify_1(expr) #expr
+
+ .section ".builtin_kernel","a"
+ .balign 0x10000
+#ifdef BUILTIN_KERNEL
+ .incbin stringify(BUILTIN_KERNEL)
+#endif
diff --git a/asm/lock.S b/asm/lock.S
new file mode 100644
index 0000000..ce28010
--- /dev/null
+++ b/asm/lock.S
@@ -0,0 +1,43 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <asm-utils.h>
+#include <asm-offsets.h>
+#include <processor.h>
+
+ .section ".text","ax"
+ .balign 0x10
+
+ /* bool try_lock(struct lock *lock) */
+.global __try_lock
+__try_lock:
+ ld %r0,0(%r3)
+ andi. %r10,%r0,1
+ bne 2f
+ lwz %r9,CPUTHREAD_PIR(%r13)
+1: ldarx %r0,0,%r3
+ andi. %r10,%r0,1
+ bne- 2f
+ ori %r0,%r0,1
+ rldimi %r0,%r9,32,0
+ stdcx. %r0,0,%r3
+ bne 1b
+ sync
+ li %r3,-1
+ blr
+2: li %r3,0
+ blr
+
diff --git a/asm/misc.S b/asm/misc.S
new file mode 100644
index 0000000..ccb30d1
--- /dev/null
+++ b/asm/misc.S
@@ -0,0 +1,43 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <asm-utils.h>
+#include <asm-offsets.h>
+#include <processor.h>
+
+ .section ".text","ax"
+ .balign 0x10
+
+ /* void set_hid0(unsigned long hid0) */
+.global set_hid0
+set_hid0:
+ sync
+ mtspr SPR_HID0,%r3
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ mfspr %r3,SPR_HID0
+ isync
+ blr
+
+.global trigger_attn
+trigger_attn:
+ sync
+ isync
+ attn
+ blr
diff --git a/ccan/Makefile.inc b/ccan/Makefile.inc
new file mode 100644
index 0000000..1a89b2e
--- /dev/null
+++ b/ccan/Makefile.inc
@@ -0,0 +1,8 @@
+# -*-Makefile-*-
+
+SUBDIRS += ccan ccan/list ccan/str
+CCAN_OBJS = list/list.o str/str.o
+CCAN=ccan/built-in.o
+
+$(CCAN): $(CCAN_OBJS:%=ccan/%)
+
diff --git a/ccan/array_size/LICENSE b/ccan/array_size/LICENSE
new file mode 100644
index 0000000..feb9b11
--- /dev/null
+++ b/ccan/array_size/LICENSE
@@ -0,0 +1,28 @@
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/ccan/array_size/_info b/ccan/array_size/_info
new file mode 100644
index 0000000..d670042
--- /dev/null
+++ b/ccan/array_size/_info
@@ -0,0 +1,46 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * array_size - routine for safely deriving the size of a visible array.
+ *
+ * This provides a simple ARRAY_SIZE() macro, which (given a good compiler)
+ * will also break compile if you try to use it on a pointer.
+ *
+ * This can ensure your code is robust to changes, without needing a gratuitous
+ * macro or constant.
+ *
+ * Example:
+ * // Outputs "Initialized 32 values"
+ * #include <ccan/array_size/array_size.h>
+ * #include <stdlib.h>
+ * #include <stdio.h>
+ *
+ * // We currently use 32 random values.
+ * static unsigned int vals[32];
+ *
+ * int main(void)
+ * {
+ * unsigned int i;
+ * for (i = 0; i < ARRAY_SIZE(vals); i++)
+ * vals[i] = random();
+ * printf("Initialized %u values\n", i);
+ * return 0;
+ * }
+ *
+ * License: CC0 (Public domain)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0) {
+ printf("ccan/build_assert\n");
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/ccan/array_size/array_size.h b/ccan/array_size/array_size.h
new file mode 100644
index 0000000..0ca422a
--- /dev/null
+++ b/ccan/array_size/array_size.h
@@ -0,0 +1,26 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_ARRAY_SIZE_H
+#define CCAN_ARRAY_SIZE_H
+#include "config.h"
+#include <ccan/build_assert/build_assert.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in a visible array
+ * @arr: the array whose size you want.
+ *
+ * This does not work on pointers, or arrays declared as [], or
+ * function parameters. With correct compiler support, such usage
+ * will cause a build error (see build_assert).
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + _array_size_chk(arr))
+
+#if HAVE_BUILTIN_TYPES_COMPATIBLE_P && HAVE_TYPEOF
+/* Two gcc extensions.
+ * &a[0] degrades to a pointer: a different type from an array */
+#define _array_size_chk(arr) \
+ BUILD_ASSERT_OR_ZERO(!__builtin_types_compatible_p(typeof(arr), \
+ typeof(&(arr)[0])))
+#else
+#define _array_size_chk(arr) 0
+#endif
+#endif /* CCAN_ALIGNOF_H */
diff --git a/ccan/array_size/test/compile_fail-function-param.c b/ccan/array_size/test/compile_fail-function-param.c
new file mode 100644
index 0000000..cb64d98
--- /dev/null
+++ b/ccan/array_size/test/compile_fail-function-param.c
@@ -0,0 +1,24 @@
+#include <ccan/array_size/array_size.h>
+#include <stdlib.h>
+
+struct foo {
+ unsigned int a, b;
+};
+
+int check_parameter(const struct foo array[4]);
+int check_parameter(const struct foo array[4])
+{
+#ifdef FAIL
+ return (ARRAY_SIZE(array) == 4);
+#if !HAVE_TYPEOF || !HAVE_BUILTIN_TYPES_COMPATIBLE_P
+#error "Unfortunately we don't fail if _array_size_chk is a noop."
+#endif
+#else
+ return sizeof(array) == 4 * sizeof(struct foo);
+#endif
+}
+
+int main(int argc, char *argv[])
+{
+ return check_parameter(NULL);
+}
diff --git a/ccan/array_size/test/compile_fail.c b/ccan/array_size/test/compile_fail.c
new file mode 100644
index 0000000..37d315f
--- /dev/null
+++ b/ccan/array_size/test/compile_fail.c
@@ -0,0 +1,14 @@
+#include <ccan/array_size/array_size.h>
+
+int main(int argc, char *argv[8])
+{
+ char array[100];
+#ifdef FAIL
+ return ARRAY_SIZE(argv) + ARRAY_SIZE(array);
+#if !HAVE_TYPEOF || !HAVE_BUILTIN_TYPES_COMPATIBLE_P
+#error "Unfortunately we don't fail if _array_size_chk is a noop."
+#endif
+#else
+ return ARRAY_SIZE(array);
+#endif
+}
diff --git a/ccan/array_size/test/run.c b/ccan/array_size/test/run.c
new file mode 100644
index 0000000..37b4200
--- /dev/null
+++ b/ccan/array_size/test/run.c
@@ -0,0 +1,33 @@
+#include <ccan/array_size/array_size.h>
+#include <ccan/tap/tap.h>
+
+static char array1[1];
+static int array2[2];
+static unsigned long array3[3][5];
+struct foo {
+ unsigned int a, b;
+ char string[100];
+};
+static struct foo array4[4];
+
+/* Make sure they can be used in initializers. */
+static int array1_size = ARRAY_SIZE(array1);
+static int array2_size = ARRAY_SIZE(array2);
+static int array3_size = ARRAY_SIZE(array3);
+static int array4_size = ARRAY_SIZE(array4);
+
+int main(int argc, char *argv[])
+{
+ plan_tests(8);
+ ok1(array1_size == 1);
+ ok1(array2_size == 2);
+ ok1(array3_size == 3);
+ ok1(array4_size == 4);
+
+ ok1(ARRAY_SIZE(array1) == 1);
+ ok1(ARRAY_SIZE(array2) == 2);
+ ok1(ARRAY_SIZE(array3) == 3);
+ ok1(ARRAY_SIZE(array4) == 4);
+
+ return exit_status();
+}
diff --git a/ccan/build_assert/LICENSE b/ccan/build_assert/LICENSE
new file mode 100644
index 0000000..feb9b11
--- /dev/null
+++ b/ccan/build_assert/LICENSE
@@ -0,0 +1,28 @@
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/ccan/build_assert/_info b/ccan/build_assert/_info
new file mode 100644
index 0000000..bce92b7
--- /dev/null
+++ b/ccan/build_assert/_info
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * build_assert - routines for build-time assertions
+ *
+ * This code provides routines which will cause compilation to fail should some
+ * assertion be untrue: such failures are preferable to run-time assertions,
+ * but much more limited since they can only depends on compile-time constants.
+ *
+ * These assertions are most useful when two parts of the code must be kept in
+ * sync: it is better to avoid such cases if possible, but seconds best is to
+ * detect invalid changes at build time.
+ *
+ * For example, a tricky piece of code might rely on a certain element being at
+ * the start of the structure. To ensure that future changes don't break it,
+ * you would catch such changes in your code like so:
+ *
+ * Example:
+ * #include <stddef.h>
+ * #include <ccan/build_assert/build_assert.h>
+ *
+ * struct foo {
+ * char string[5];
+ * int x;
+ * };
+ *
+ * static char *foo_string(struct foo *foo)
+ * {
+ * // This trick requires that the string be first in the structure
+ * BUILD_ASSERT(offsetof(struct foo, string) == 0);
+ * return (char *)foo;
+ * }
+ *
+ * License: CC0 (Public domain)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0)
+ /* Nothing. */
+ return 0;
+
+ return 1;
+}
diff --git a/ccan/build_assert/build_assert.h b/ccan/build_assert/build_assert.h
new file mode 100644
index 0000000..b9ecd84
--- /dev/null
+++ b/ccan/build_assert/build_assert.h
@@ -0,0 +1,40 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_BUILD_ASSERT_H
+#define CCAN_BUILD_ASSERT_H
+
+/**
+ * BUILD_ASSERT - assert a build-time dependency.
+ * @cond: the compile-time condition which must be true.
+ *
+ * Your compile will fail if the condition isn't true, or can't be evaluated
+ * by the compiler. This can only be used within a function.
+ *
+ * Example:
+ * #include <stddef.h>
+ * ...
+ * static char *foo_to_char(struct foo *foo)
+ * {
+ * // This code needs string to be at start of foo.
+ * BUILD_ASSERT(offsetof(struct foo, string) == 0);
+ * return (char *)foo;
+ * }
+ */
+#define BUILD_ASSERT(cond) \
+ do { (void) sizeof(char [1 - 2*!(cond)]); } while(0)
+
+/**
+ * BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression.
+ * @cond: the compile-time condition which must be true.
+ *
+ * Your compile will fail if the condition isn't true, or can't be evaluated
+ * by the compiler. This can be used in an expression: its value is "0".
+ *
+ * Example:
+ * #define foo_to_char(foo) \
+ * ((char *)(foo) \
+ * + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0))
+ */
+#define BUILD_ASSERT_OR_ZERO(cond) \
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+
+#endif /* CCAN_BUILD_ASSERT_H */
diff --git a/ccan/build_assert/test/compile_fail-expr.c b/ccan/build_assert/test/compile_fail-expr.c
new file mode 100644
index 0000000..109215b
--- /dev/null
+++ b/ccan/build_assert/test/compile_fail-expr.c
@@ -0,0 +1,10 @@
+#include <ccan/build_assert/build_assert.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+ return BUILD_ASSERT_OR_ZERO(1 == 0);
+#else
+ return 0;
+#endif
+}
diff --git a/ccan/build_assert/test/compile_fail.c b/ccan/build_assert/test/compile_fail.c
new file mode 100644
index 0000000..37d95ed
--- /dev/null
+++ b/ccan/build_assert/test/compile_fail.c
@@ -0,0 +1,9 @@
+#include <ccan/build_assert/build_assert.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+ BUILD_ASSERT(1 == 0);
+#endif
+ return 0;
+}
diff --git a/ccan/build_assert/test/compile_ok.c b/ccan/build_assert/test/compile_ok.c
new file mode 100644
index 0000000..4105484
--- /dev/null
+++ b/ccan/build_assert/test/compile_ok.c
@@ -0,0 +1,7 @@
+#include <ccan/build_assert/build_assert.h>
+
+int main(int argc, char *argv[])
+{
+ BUILD_ASSERT(1 == 1);
+ return 0;
+}
diff --git a/ccan/build_assert/test/run-BUILD_ASSERT_OR_ZERO.c b/ccan/build_assert/test/run-BUILD_ASSERT_OR_ZERO.c
new file mode 100644
index 0000000..4185821
--- /dev/null
+++ b/ccan/build_assert/test/run-BUILD_ASSERT_OR_ZERO.c
@@ -0,0 +1,9 @@
+#include <ccan/build_assert/build_assert.h>
+#include <ccan/tap/tap.h>
+
+int main(int argc, char *argv[])
+{
+ plan_tests(1);
+ ok1(BUILD_ASSERT_OR_ZERO(1 == 1) == 0);
+ return exit_status();
+}
diff --git a/ccan/check_type/LICENSE b/ccan/check_type/LICENSE
new file mode 100644
index 0000000..feb9b11
--- /dev/null
+++ b/ccan/check_type/LICENSE
@@ -0,0 +1,28 @@
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/ccan/check_type/_info b/ccan/check_type/_info
new file mode 100644
index 0000000..cb19e20
--- /dev/null
+++ b/ccan/check_type/_info
@@ -0,0 +1,33 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * check_type - routines for compile time type checking
+ *
+ * C has fairly weak typing: ints get automatically converted to longs, signed
+ * to unsigned, etc. There are some cases where this is best avoided, and
+ * these macros provide methods for evoking warnings (or build errors) when
+ * a precise type isn't used.
+ *
+ * On compilers which don't support typeof() these routines are less effective,
+ * since they have to use sizeof() which can only distiguish between types of
+ * different size.
+ *
+ * License: CC0 (Public domain)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0) {
+#if !HAVE_TYPEOF
+ printf("ccan/build_assert\n");
+#endif
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/ccan/check_type/check_type.h b/ccan/check_type/check_type.h
new file mode 100644
index 0000000..77501a9
--- /dev/null
+++ b/ccan/check_type/check_type.h
@@ -0,0 +1,64 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_CHECK_TYPE_H
+#define CCAN_CHECK_TYPE_H
+#include "config.h"
+
+/**
+ * check_type - issue a warning or build failure if type is not correct.
+ * @expr: the expression whose type we should check (not evaluated).
+ * @type: the exact type we expect the expression to be.
+ *
+ * This macro is usually used within other macros to try to ensure that a macro
+ * argument is of the expected type. No type promotion of the expression is
+ * done: an unsigned int is not the same as an int!
+ *
+ * check_type() always evaluates to 0.
+ *
+ * If your compiler does not support typeof, then the best we can do is fail
+ * to compile if the sizes of the types are unequal (a less complete check).
+ *
+ * Example:
+ * // They should always pass a 64-bit value to _set_some_value!
+ * #define set_some_value(expr) \
+ * _set_some_value((check_type((expr), uint64_t), (expr)))
+ */
+
+/**
+ * check_types_match - issue a warning or build failure if types are not same.
+ * @expr1: the first expression (not evaluated).
+ * @expr2: the second expression (not evaluated).
+ *
+ * This macro is usually used within other macros to try to ensure that
+ * arguments are of identical types. No type promotion of the expressions is
+ * done: an unsigned int is not the same as an int!
+ *
+ * check_types_match() always evaluates to 0.
+ *
+ * If your compiler does not support typeof, then the best we can do is fail
+ * to compile if the sizes of the types are unequal (a less complete check).
+ *
+ * Example:
+ * // Do subtraction to get to enclosing type, but make sure that
+ * // pointer is of correct type for that member.
+ * #define container_of(mbr_ptr, encl_type, mbr) \
+ * (check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \
+ * ((encl_type *) \
+ * ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr))))
+ */
+#if HAVE_TYPEOF
+#define check_type(expr, type) \
+ ((typeof(expr) *)0 != (type *)0)
+
+#define check_types_match(expr1, expr2) \
+ ((typeof(expr1) *)0 != (typeof(expr2) *)0)
+#else
+#include <ccan/build_assert/build_assert.h>
+/* Without typeof, we can only test the sizes. */
+#define check_type(expr, type) \
+ BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type))
+
+#define check_types_match(expr1, expr2) \
+ BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2))
+#endif /* HAVE_TYPEOF */
+
+#endif /* CCAN_CHECK_TYPE_H */
diff --git a/ccan/check_type/test/compile_fail-check_type.c b/ccan/check_type/test/compile_fail-check_type.c
new file mode 100644
index 0000000..fe7d6a2
--- /dev/null
+++ b/ccan/check_type/test/compile_fail-check_type.c
@@ -0,0 +1,9 @@
+#include <ccan/check_type/check_type.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+ check_type(argc, char);
+#endif
+ return 0;
+}
diff --git a/ccan/check_type/test/compile_fail-check_type_unsigned.c b/ccan/check_type/test/compile_fail-check_type_unsigned.c
new file mode 100644
index 0000000..574d4ae
--- /dev/null
+++ b/ccan/check_type/test/compile_fail-check_type_unsigned.c
@@ -0,0 +1,14 @@
+#include <ccan/check_type/check_type.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if HAVE_TYPEOF
+ check_type(argc, unsigned int);
+#else
+ /* This doesn't work without typeof, so just fail */
+#error "Fail without typeof"
+#endif
+#endif
+ return 0;
+}
diff --git a/ccan/check_type/test/compile_fail-check_types_match.c b/ccan/check_type/test/compile_fail-check_types_match.c
new file mode 100644
index 0000000..cbd6e9b
--- /dev/null
+++ b/ccan/check_type/test/compile_fail-check_types_match.c
@@ -0,0 +1,10 @@
+#include <ccan/check_type/check_type.h>
+
+int main(int argc, char *argv[])
+{
+ unsigned char x = argc;
+#ifdef FAIL
+ check_types_match(argc, x);
+#endif
+ return x;
+}
diff --git a/ccan/check_type/test/run.c b/ccan/check_type/test/run.c
new file mode 100644
index 0000000..83b903c
--- /dev/null
+++ b/ccan/check_type/test/run.c
@@ -0,0 +1,22 @@
+#include <ccan/check_type/check_type.h>
+#include <ccan/tap/tap.h>
+
+int main(int argc, char *argv[])
+{
+ int x = 0, y = 0;
+
+ plan_tests(9);
+
+ ok1(check_type(argc, int) == 0);
+ ok1(check_type(&argc, int *) == 0);
+ ok1(check_types_match(argc, argc) == 0);
+ ok1(check_types_match(argc, x) == 0);
+ ok1(check_types_match(&argc, &x) == 0);
+
+ ok1(check_type(x++, int) == 0);
+ ok(x == 0, "check_type does not evaluate expression");
+ ok1(check_types_match(x++, y++) == 0);
+ ok(x == 0 && y == 0, "check_types_match does not evaluate expressions");
+
+ return exit_status();
+}
diff --git a/ccan/container_of/LICENSE b/ccan/container_of/LICENSE
new file mode 100644
index 0000000..feb9b11
--- /dev/null
+++ b/ccan/container_of/LICENSE
@@ -0,0 +1,28 @@
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/ccan/container_of/_info b/ccan/container_of/_info
new file mode 100644
index 0000000..77b3bd1
--- /dev/null
+++ b/ccan/container_of/_info
@@ -0,0 +1,63 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * container_of - routine for upcasting
+ *
+ * It is often convenient to create code where the caller registers a pointer
+ * to a generic structure and a callback. The callback might know that the
+ * pointer points to within a larger structure, and container_of gives a
+ * convenient and fairly type-safe way of returning to the enclosing structure.
+ *
+ * This idiom is an alternative to providing a void * pointer for every
+ * callback.
+ *
+ * Example:
+ * #include <stdio.h>
+ * #include <ccan/container_of/container_of.h>
+ *
+ * struct timer {
+ * void *members;
+ * };
+ *
+ * struct info {
+ * int my_stuff;
+ * struct timer timer;
+ * };
+ *
+ * static void register_timer(struct timer *timer)
+ * {
+ * //...
+ * }
+ *
+ * static void my_timer_callback(struct timer *timer)
+ * {
+ * struct info *info = container_of(timer, struct info, timer);
+ * printf("my_stuff is %u\n", info->my_stuff);
+ * }
+ *
+ * int main(void)
+ * {
+ * struct info info = { .my_stuff = 1 };
+ *
+ * register_timer(&info.timer);
+ * // ...
+ * return 0;
+ * }
+ *
+ * License: CC0 (Public domain)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0) {
+ printf("ccan/check_type\n");
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/ccan/container_of/container_of.h b/ccan/container_of/container_of.h
new file mode 100644
index 0000000..2943b8f
--- /dev/null
+++ b/ccan/container_of/container_of.h
@@ -0,0 +1,109 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_CONTAINER_OF_H
+#define CCAN_CONTAINER_OF_H
+#include <stddef.h>
+
+#include "config.h"
+#include <ccan/check_type/check_type.h>
+
+/**
+ * container_of - get pointer to enclosing structure
+ * @member_ptr: pointer to the structure member
+ * @containing_type: the type this member is within
+ * @member: the name of this member within the structure.
+ *
+ * Given a pointer to a member of a structure, this macro does pointer
+ * subtraction to return the pointer to the enclosing type.
+ *
+ * Example:
+ * struct foo {
+ * int fielda, fieldb;
+ * // ...
+ * };
+ * struct info {
+ * int some_other_field;
+ * struct foo my_foo;
+ * };
+ *
+ * static struct info *foo_to_info(struct foo *foo)
+ * {
+ * return container_of(foo, struct info, my_foo);
+ * }
+ */
+#define container_of(member_ptr, containing_type, member) \
+ ((containing_type *) \
+ ((char *)(member_ptr) \
+ - container_off(containing_type, member)) \
+ + check_types_match(*(member_ptr), ((containing_type *)0)->member))
+
+/**
+ * container_off - get offset to enclosing structure
+ * @containing_type: the type this member is within
+ * @member: the name of this member within the structure.
+ *
+ * Given a pointer to a member of a structure, this macro does
+ * typechecking and figures out the offset to the enclosing type.
+ *
+ * Example:
+ * struct foo {
+ * int fielda, fieldb;
+ * // ...
+ * };
+ * struct info {
+ * int some_other_field;
+ * struct foo my_foo;
+ * };
+ *
+ * static struct info *foo_to_info(struct foo *foo)
+ * {
+ * size_t off = container_off(struct info, my_foo);
+ * return (void *)((char *)foo - off);
+ * }
+ */
+#define container_off(containing_type, member) \
+ offsetof(containing_type, member)
+
+/**
+ * container_of_var - get pointer to enclosing structure using a variable
+ * @member_ptr: pointer to the structure member
+ * @container_var: a pointer of same type as this member's container
+ * @member: the name of this member within the structure.
+ *
+ * Given a pointer to a member of a structure, this macro does pointer
+ * subtraction to return the pointer to the enclosing type.
+ *
+ * Example:
+ * static struct info *foo_to_i(struct foo *foo)
+ * {
+ * struct info *i = container_of_var(foo, i, my_foo);
+ * return i;
+ * }
+ */
+#if HAVE_TYPEOF
+#define container_of_var(member_ptr, container_var, member) \
+ container_of(member_ptr, typeof(*container_var), member)
+#else
+#define container_of_var(member_ptr, container_var, member) \
+ ((void *)((char *)(member_ptr) - \
+ container_off_var(container_var, member)))
+#endif
+
+/**
+ * container_off_var - get offset of a field in enclosing structure
+ * @container_var: a pointer to a container structure
+ * @member: the name of a member within the structure.
+ *
+ * Given (any) pointer to a structure and a its member name, this
+ * macro does pointer subtraction to return offset of member in a
+ * structure memory layout.
+ *
+ */
+#if HAVE_TYPEOF
+#define container_off_var(var, member) \
+ container_off(typeof(*var), member)
+#else
+#define container_off_var(var, member) \
+ ((char *)&(var)->member - (char *)(var))
+#endif
+
+#endif /* CCAN_CONTAINER_OF_H */
diff --git a/ccan/container_of/test/compile_fail-bad-type.c b/ccan/container_of/test/compile_fail-bad-type.c
new file mode 100644
index 0000000..b7a1459
--- /dev/null
+++ b/ccan/container_of/test/compile_fail-bad-type.c
@@ -0,0 +1,22 @@
+#include <ccan/container_of/container_of.h>
+#include <stdlib.h>
+
+struct foo {
+ int a;
+ char b;
+};
+
+int main(int argc, char *argv[])
+{
+ struct foo foo = { .a = 1, .b = 2 };
+ int *intp = &foo.a;
+ char *p;
+
+#ifdef FAIL
+ /* p is a char *, but this gives a struct foo * */
+ p = container_of(intp, struct foo, a);
+#else
+ p = (char *)intp;
+#endif
+ return p == NULL;
+}
diff --git a/ccan/container_of/test/compile_fail-types.c b/ccan/container_of/test/compile_fail-types.c
new file mode 100644
index 0000000..cae1c7a
--- /dev/null
+++ b/ccan/container_of/test/compile_fail-types.c
@@ -0,0 +1,22 @@
+#include <ccan/container_of/container_of.h>
+#include <stdlib.h>
+
+struct foo {
+ int a;
+ char b;
+};
+
+int main(int argc, char *argv[])
+{
+ struct foo foo = { .a = 1, .b = 2 }, *foop;
+ int *intp = &foo.a;
+
+#ifdef FAIL
+ /* b is a char, but intp is an int * */
+ foop = container_of(intp, struct foo, b);
+#else
+ foop = NULL;
+#endif
+ (void) foop; /* Suppress unused-but-set-variable warning. */
+ return intp == NULL;
+}
diff --git a/ccan/container_of/test/compile_fail-var-types.c b/ccan/container_of/test/compile_fail-var-types.c
new file mode 100644
index 0000000..f254d92
--- /dev/null
+++ b/ccan/container_of/test/compile_fail-var-types.c
@@ -0,0 +1,25 @@
+#include <ccan/container_of/container_of.h>
+#include <stdlib.h>
+
+struct foo {
+ int a;
+ char b;
+};
+
+int main(int argc, char *argv[])
+{
+ struct foo foo = { .a = 1, .b = 2 }, *foop;
+ int *intp = &foo.a;
+
+#ifdef FAIL
+ /* b is a char, but intp is an int * */
+ foop = container_of_var(intp, foop, b);
+#if !HAVE_TYPEOF
+#error "Unfortunately we don't fail if we don't have typeof."
+#endif
+#else
+ foop = NULL;
+#endif
+ (void) foop; /* Suppress unused-but-set-variable warning. */
+ return intp == NULL;
+}
diff --git a/ccan/container_of/test/run.c b/ccan/container_of/test/run.c
new file mode 100644
index 0000000..96ef483
--- /dev/null
+++ b/ccan/container_of/test/run.c
@@ -0,0 +1,24 @@
+#include <ccan/container_of/container_of.h>
+#include <ccan/tap/tap.h>
+
+struct foo {
+ int a;
+ char b;
+};
+
+int main(int argc, char *argv[])
+{
+ struct foo foo = { .a = 1, .b = 2 };
+ int *intp = &foo.a;
+ char *charp = &foo.b;
+
+ plan_tests(6);
+ ok1(container_of(intp, struct foo, a) == &foo);
+ ok1(container_of(charp, struct foo, b) == &foo);
+ ok1(container_of_var(intp, &foo, a) == &foo);
+ ok1(container_of_var(charp, &foo, b) == &foo);
+
+ ok1(container_off(struct foo, a) == 0);
+ ok1(container_off(struct foo, b) == offsetof(struct foo, b));
+ return exit_status();
+}
diff --git a/ccan/endian/LICENSE b/ccan/endian/LICENSE
new file mode 100644
index 0000000..feb9b11
--- /dev/null
+++ b/ccan/endian/LICENSE
@@ -0,0 +1,28 @@
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/ccan/endian/_info b/ccan/endian/_info
new file mode 100644
index 0000000..753afa7
--- /dev/null
+++ b/ccan/endian/_info
@@ -0,0 +1,53 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * endian - endian conversion macros for simple types
+ *
+ * Portable protocols (such as on-disk formats, or network protocols)
+ * are often defined to be a particular endian: little-endian (least
+ * significant bytes first) or big-endian (most significant bytes
+ * first).
+ *
+ * Similarly, some CPUs lay out values in memory in little-endian
+ * order (most commonly, Intel's 8086 and derivatives), or big-endian
+ * order (almost everyone else).
+ *
+ * This module provides conversion routines, inspired by the linux kernel.
+ *
+ * Example:
+ * #include <stdio.h>
+ * #include <err.h>
+ * #include <ccan/endian/endian.h>
+ *
+ * //
+ * int main(int argc, char *argv[])
+ * {
+ * uint32_t value;
+ *
+ * if (argc != 2)
+ * errx(1, "Usage: %s <value>", argv[0]);
+ *
+ * value = atoi(argv[1]);
+ * printf("native: %08x\n", value);
+ * printf("little-endian: %08x\n", cpu_to_le32(value));
+ * printf("big-endian: %08x\n", cpu_to_be32(value));
+ * printf("byte-reversed: %08x\n", bswap_32(value));
+ * exit(0);
+ * }
+ *
+ * License: License: CC0 (Public domain)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0)
+ /* Nothing */
+ return 0;
+
+ return 1;
+}
diff --git a/ccan/endian/endian.h b/ccan/endian/endian.h
new file mode 100644
index 0000000..4dc58dd
--- /dev/null
+++ b/ccan/endian/endian.h
@@ -0,0 +1,314 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_ENDIAN_H
+#define CCAN_ENDIAN_H
+#include <stdint.h>
+#include "config.h"
+
+/**
+ * BSWAP_16 - reverse bytes in a constant uint16_t value.
+ * @val: constant value whose bytes to swap.
+ *
+ * Designed to be usable in constant-requiring initializers.
+ *
+ * Example:
+ * struct mystruct {
+ * char buf[BSWAP_16(0x1234)];
+ * };
+ */
+#define BSWAP_16(val) \
+ ((((uint16_t)val & 0x00ff) << 8) \
+ | (((uint16_t)val & 0xff00) >> 8))
+
+/**
+ * BSWAP_32 - reverse bytes in a constant uint32_t value.
+ * @val: constant value whose bytes to swap.
+ *
+ * Designed to be usable in constant-requiring initializers.
+ *
+ * Example:
+ * struct mystruct {
+ * char buf[BSWAP_32(0xff000000)];
+ * };
+ */
+#define BSWAP_32(val) \
+ ((((uint32_t)val & 0x000000ff) << 24) \
+ | (((uint32_t)val & 0x0000ff00) << 8) \
+ | (((uint32_t)val & 0x00ff0000) >> 8) \
+ | (((uint32_t)val & 0xff000000) >> 24))
+
+/**
+ * BSWAP_64 - reverse bytes in a constant uint64_t value.
+ * @val: constantvalue whose bytes to swap.
+ *
+ * Designed to be usable in constant-requiring initializers.
+ *
+ * Example:
+ * struct mystruct {
+ * char buf[BSWAP_64(0xff00000000000000ULL)];
+ * };
+ */
+#define BSWAP_64(val) \
+ ((((uint64_t)val & 0x00000000000000ffULL) << 56) \
+ | (((uint64_t)val & 0x000000000000ff00ULL) << 40) \
+ | (((uint64_t)val & 0x0000000000ff0000ULL) << 24) \
+ | (((uint64_t)val & 0x00000000ff000000ULL) << 8) \
+ | (((uint64_t)val & 0x000000ff00000000ULL) >> 8) \
+ | (((uint64_t)val & 0x0000ff0000000000ULL) >> 24) \
+ | (((uint64_t)val & 0x00ff000000000000ULL) >> 40) \
+ | (((uint64_t)val & 0xff00000000000000ULL) >> 56))
+
+#if HAVE_BYTESWAP_H
+#include <byteswap.h>
+#else
+/**
+ * bswap_16 - reverse bytes in a uint16_t value.
+ * @val: value whose bytes to swap.
+ *
+ * Example:
+ * // Output contains "1024 is 4 as two bytes reversed"
+ * printf("1024 is %u as two bytes reversed\n", bswap_16(1024));
+ */
+static inline uint16_t bswap_16(uint16_t val)
+{
+ return BSWAP_16(val);
+}
+
+/**
+ * bswap_32 - reverse bytes in a uint32_t value.
+ * @val: value whose bytes to swap.
+ *
+ * Example:
+ * // Output contains "1024 is 262144 as four bytes reversed"
+ * printf("1024 is %u as four bytes reversed\n", bswap_32(1024));
+ */
+static inline uint32_t bswap_32(uint32_t val)
+{
+ return BSWAP_32(val);
+}
+#endif /* !HAVE_BYTESWAP_H */
+
+#if !HAVE_BSWAP_64
+/**
+ * bswap_64 - reverse bytes in a uint64_t value.
+ * @val: value whose bytes to swap.
+ *
+ * Example:
+ * // Output contains "1024 is 1125899906842624 as eight bytes reversed"
+ * printf("1024 is %llu as eight bytes reversed\n",
+ * (unsigned long long)bswap_64(1024));
+ */
+static inline uint64_t bswap_64(uint64_t val)
+{
+ return BSWAP_64(val);
+}
+#endif
+
+/* Sanity check the defines. We don't handle weird endianness. */
+#if !HAVE_LITTLE_ENDIAN && !HAVE_BIG_ENDIAN
+#error "Unknown endian"
+#elif HAVE_LITTLE_ENDIAN && HAVE_BIG_ENDIAN
+#error "Can't compile for both big and little endian."
+#endif
+
+#if HAVE_LITTLE_ENDIAN
+/**
+ * CPU_TO_LE64 - convert a constant uint64_t value to little-endian
+ * @native: constant to convert
+ */
+#define CPU_TO_LE64(native) (native)
+
+/**
+ * CPU_TO_LE32 - convert a constant uint32_t value to little-endian
+ * @native: constant to convert
+ */
+#define CPU_TO_LE32(native) (native)
+
+/**
+ * CPU_TO_LE16 - convert a constant uint16_t value to little-endian
+ * @native: constant to convert
+ */
+#define CPU_TO_LE16(native) (native)
+
+/**
+ * LE64_TO_CPU - convert a little-endian uint64_t constant
+ * @le_val: little-endian constant to convert
+ */
+#define LE64_TO_CPU(le_val) (le_val)
+
+/**
+ * LE32_TO_CPU - convert a little-endian uint32_t constant
+ * @le_val: little-endian constant to convert
+ */
+#define LE32_TO_CPU(le_val) (le_val)
+
+/**
+ * LE16_TO_CPU - convert a little-endian uint16_t constant
+ * @le_val: little-endian constant to convert
+ */
+#define LE16_TO_CPU(le_val) (le_val)
+
+#else /* ... HAVE_BIG_ENDIAN */
+#define CPU_TO_LE64(native) BSWAP_64(native)
+#define CPU_TO_LE32(native) BSWAP_32(native)
+#define CPU_TO_LE16(native) BSWAP_16(native)
+#define LE64_TO_CPU(le_val) BSWAP_64(le_val)
+#define LE32_TO_CPU(le_val) BSWAP_32(le_val)
+#define LE16_TO_CPU(le_val) BSWAP_16(le_val)
+#endif /* HAVE_BIG_ENDIAN */
+
+#if HAVE_BIG_ENDIAN
+/**
+ * CPU_TO_BE64 - convert a constant uint64_t value to big-endian
+ * @native: constant to convert
+ */
+#define CPU_TO_BE64(native) (native)
+
+/**
+ * CPU_TO_BE32 - convert a constant uint32_t value to big-endian
+ * @native: constant to convert
+ */
+#define CPU_TO_BE32(native) (native)
+
+/**
+ * CPU_TO_BE16 - convert a constant uint16_t value to big-endian
+ * @native: constant to convert
+ */
+#define CPU_TO_BE16(native) (native)
+
+/**
+ * BE64_TO_CPU - convert a big-endian uint64_t constant
+ * @le_val: big-endian constant to convert
+ */
+#define BE64_TO_CPU(le_val) (le_val)
+
+/**
+ * BE32_TO_CPU - convert a big-endian uint32_t constant
+ * @le_val: big-endian constant to convert
+ */
+#define BE32_TO_CPU(le_val) (le_val)
+
+/**
+ * BE16_TO_CPU - convert a big-endian uint16_t constant
+ * @le_val: big-endian constant to convert
+ */
+#define BE16_TO_CPU(le_val) (le_val)
+
+#else /* ... HAVE_LITTLE_ENDIAN */
+#define CPU_TO_BE64(native) BSWAP_64(native)
+#define CPU_TO_BE32(native) BSWAP_32(native)
+#define CPU_TO_BE16(native) BSWAP_16(native)
+#define BE64_TO_CPU(le_val) BSWAP_64(le_val)
+#define BE32_TO_CPU(le_val) BSWAP_32(le_val)
+#define BE16_TO_CPU(le_val) BSWAP_16(le_val)
+#endif /* HAVE_LITTE_ENDIAN */
+
+
+/**
+ * cpu_to_le64 - convert a uint64_t value to little-endian
+ * @native: value to convert
+ */
+static inline uint64_t cpu_to_le64(uint64_t native)
+{
+ return CPU_TO_LE64(native);
+}
+
+/**
+ * cpu_to_le32 - convert a uint32_t value to little-endian
+ * @native: value to convert
+ */
+static inline uint32_t cpu_to_le32(uint32_t native)
+{
+ return CPU_TO_LE32(native);
+}
+
+/**
+ * cpu_to_le16 - convert a uint16_t value to little-endian
+ * @native: value to convert
+ */
+static inline uint16_t cpu_to_le16(uint16_t native)
+{
+ return CPU_TO_LE16(native);
+}
+
+/**
+ * le64_to_cpu - convert a little-endian uint64_t value
+ * @le_val: little-endian value to convert
+ */
+static inline uint64_t le64_to_cpu(uint64_t le_val)
+{
+ return LE64_TO_CPU(le_val);
+}
+
+/**
+ * le32_to_cpu - convert a little-endian uint32_t value
+ * @le_val: little-endian value to convert
+ */
+static inline uint32_t le32_to_cpu(uint32_t le_val)
+{
+ return LE32_TO_CPU(le_val);
+}
+
+/**
+ * le16_to_cpu - convert a little-endian uint16_t value
+ * @le_val: little-endian value to convert
+ */
+static inline uint16_t le16_to_cpu(uint16_t le_val)
+{
+ return LE16_TO_CPU(le_val);
+}
+
+/**
+ * cpu_to_be64 - convert a uint64_t value to big endian.
+ * @native: value to convert
+ */
+static inline uint64_t cpu_to_be64(uint64_t native)
+{
+ return CPU_TO_BE64(native);
+}
+
+/**
+ * cpu_to_be32 - convert a uint32_t value to big endian.
+ * @native: value to convert
+ */
+static inline uint32_t cpu_to_be32(uint32_t native)
+{
+ return CPU_TO_BE32(native);
+}
+
+/**
+ * cpu_to_be16 - convert a uint16_t value to big endian.
+ * @native: value to convert
+ */
+static inline uint16_t cpu_to_be16(uint16_t native)
+{
+ return CPU_TO_BE16(native);
+}
+
+/**
+ * be64_to_cpu - convert a big-endian uint64_t value
+ * @be_val: big-endian value to convert
+ */
+static inline uint64_t be64_to_cpu(uint64_t be_val)
+{
+ return BE64_TO_CPU(be_val);
+}
+
+/**
+ * be32_to_cpu - convert a big-endian uint32_t value
+ * @be_val: big-endian value to convert
+ */
+static inline uint32_t be32_to_cpu(uint32_t be_val)
+{
+ return BE32_TO_CPU(be_val);
+}
+
+/**
+ * be16_to_cpu - convert a big-endian uint16_t value
+ * @be_val: big-endian value to convert
+ */
+static inline uint16_t be16_to_cpu(uint16_t be_val)
+{
+ return BE16_TO_CPU(be_val);
+}
+
+#endif /* CCAN_ENDIAN_H */
diff --git a/ccan/endian/test/compile_ok-constant.c b/ccan/endian/test/compile_ok-constant.c
new file mode 100644
index 0000000..1aef1dd
--- /dev/null
+++ b/ccan/endian/test/compile_ok-constant.c
@@ -0,0 +1,12 @@
+#include <ccan/endian/endian.h>
+
+struct foo {
+ char one[BSWAP_16(0xFF00)];
+ char two[BSWAP_32(0xFF000000)];
+ char three[BSWAP_64(0xFF00000000000000ULL)];
+};
+
+int main(void)
+{
+ return 0;
+}
diff --git a/ccan/endian/test/run.c b/ccan/endian/test/run.c
new file mode 100644
index 0000000..a00fce7
--- /dev/null
+++ b/ccan/endian/test/run.c
@@ -0,0 +1,106 @@
+#include <ccan/endian/endian.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <ccan/tap/tap.h>
+
+int main(int argc, char *argv[])
+{
+ union {
+ uint64_t u64;
+ unsigned char u64_bytes[8];
+ } u64;
+ union {
+ uint32_t u32;
+ unsigned char u32_bytes[4];
+ } u32;
+ union {
+ uint16_t u16;
+ unsigned char u16_bytes[2];
+ } u16;
+
+ plan_tests(48);
+
+ /* Straight swap tests. */
+ u64.u64_bytes[0] = 0x00;
+ u64.u64_bytes[1] = 0x11;
+ u64.u64_bytes[2] = 0x22;
+ u64.u64_bytes[3] = 0x33;
+ u64.u64_bytes[4] = 0x44;
+ u64.u64_bytes[5] = 0x55;
+ u64.u64_bytes[6] = 0x66;
+ u64.u64_bytes[7] = 0x77;
+ u64.u64 = bswap_64(u64.u64);
+ ok1(u64.u64_bytes[7] == 0x00);
+ ok1(u64.u64_bytes[6] == 0x11);
+ ok1(u64.u64_bytes[5] == 0x22);
+ ok1(u64.u64_bytes[4] == 0x33);
+ ok1(u64.u64_bytes[3] == 0x44);
+ ok1(u64.u64_bytes[2] == 0x55);
+ ok1(u64.u64_bytes[1] == 0x66);
+ ok1(u64.u64_bytes[0] == 0x77);
+
+ u32.u32_bytes[0] = 0x00;
+ u32.u32_bytes[1] = 0x11;
+ u32.u32_bytes[2] = 0x22;
+ u32.u32_bytes[3] = 0x33;
+ u32.u32 = bswap_32(u32.u32);
+ ok1(u32.u32_bytes[3] == 0x00);
+ ok1(u32.u32_bytes[2] == 0x11);
+ ok1(u32.u32_bytes[1] == 0x22);
+ ok1(u32.u32_bytes[0] == 0x33);
+
+ u16.u16_bytes[0] = 0x00;
+ u16.u16_bytes[1] = 0x11;
+ u16.u16 = bswap_16(u16.u16);
+ ok1(u16.u16_bytes[1] == 0x00);
+ ok1(u16.u16_bytes[0] == 0x11);
+
+ /* Endian tests. */
+ u64.u64 = cpu_to_le64(0x0011223344556677ULL);
+ ok1(u64.u64_bytes[0] == 0x77);
+ ok1(u64.u64_bytes[1] == 0x66);
+ ok1(u64.u64_bytes[2] == 0x55);
+ ok1(u64.u64_bytes[3] == 0x44);
+ ok1(u64.u64_bytes[4] == 0x33);
+ ok1(u64.u64_bytes[5] == 0x22);
+ ok1(u64.u64_bytes[6] == 0x11);
+ ok1(u64.u64_bytes[7] == 0x00);
+ ok1(le64_to_cpu(u64.u64) == 0x0011223344556677ULL);
+
+ u64.u64 = cpu_to_be64(0x0011223344556677ULL);
+ ok1(u64.u64_bytes[7] == 0x77);
+ ok1(u64.u64_bytes[6] == 0x66);
+ ok1(u64.u64_bytes[5] == 0x55);
+ ok1(u64.u64_bytes[4] == 0x44);
+ ok1(u64.u64_bytes[3] == 0x33);
+ ok1(u64.u64_bytes[2] == 0x22);
+ ok1(u64.u64_bytes[1] == 0x11);
+ ok1(u64.u64_bytes[0] == 0x00);
+ ok1(be64_to_cpu(u64.u64) == 0x0011223344556677ULL);
+
+ u32.u32 = cpu_to_le32(0x00112233);
+ ok1(u32.u32_bytes[0] == 0x33);
+ ok1(u32.u32_bytes[1] == 0x22);
+ ok1(u32.u32_bytes[2] == 0x11);
+ ok1(u32.u32_bytes[3] == 0x00);
+ ok1(le32_to_cpu(u32.u32) == 0x00112233);
+
+ u32.u32 = cpu_to_be32(0x00112233);
+ ok1(u32.u32_bytes[3] == 0x33);
+ ok1(u32.u32_bytes[2] == 0x22);
+ ok1(u32.u32_bytes[1] == 0x11);
+ ok1(u32.u32_bytes[0] == 0x00);
+ ok1(be32_to_cpu(u32.u32) == 0x00112233);
+
+ u16.u16 = cpu_to_le16(0x0011);
+ ok1(u16.u16_bytes[0] == 0x11);
+ ok1(u16.u16_bytes[1] == 0x00);
+ ok1(le16_to_cpu(u16.u16) == 0x0011);
+
+ u16.u16 = cpu_to_be16(0x0011);
+ ok1(u16.u16_bytes[1] == 0x11);
+ ok1(u16.u16_bytes[0] == 0x00);
+ ok1(be16_to_cpu(u16.u16) == 0x0011);
+
+ exit(exit_status());
+}
diff --git a/ccan/list/LICENSE b/ccan/list/LICENSE
new file mode 100644
index 0000000..89de354
--- /dev/null
+++ b/ccan/list/LICENSE
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/ccan/list/_info b/ccan/list/_info
new file mode 100644
index 0000000..41a81fb
--- /dev/null
+++ b/ccan/list/_info
@@ -0,0 +1,70 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * list - double linked list routines
+ *
+ * The list header contains routines for manipulating double linked lists.
+ * It defines two types: struct list_head used for anchoring lists, and
+ * struct list_node which is usually embedded in the structure which is placed
+ * in the list.
+ *
+ * Example:
+ * #include <err.h>
+ * #include <stdio.h>
+ * #include <stdlib.h>
+ * #include <ccan/list/list.h>
+ *
+ * struct parent {
+ * const char *name;
+ * struct list_head children;
+ * unsigned int num_children;
+ * };
+ *
+ * struct child {
+ * const char *name;
+ * struct list_node list;
+ * };
+ *
+ * int main(int argc, char *argv[])
+ * {
+ * struct parent p;
+ * struct child *c;
+ * unsigned int i;
+ *
+ * if (argc < 2)
+ * errx(1, "Usage: %s parent children...", argv[0]);
+ *
+ * p.name = argv[1];
+ * list_head_init(&p.children);
+ * p.num_children = 0;
+ * for (i = 2; i < argc; i++) {
+ * c = malloc(sizeof(*c));
+ * c->name = argv[i];
+ * list_add(&p.children, &c->list);
+ * p.num_children++;
+ * }
+ *
+ * printf("%s has %u children:", p.name, p.num_children);
+ * list_for_each(&p.children, c, list)
+ * printf("%s ", c->name);
+ * printf("\n");
+ * return 0;
+ * }
+ *
+ * License: BSD-MIT
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0) {
+ printf("ccan/container_of\n");
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/ccan/list/list.c b/ccan/list/list.c
new file mode 100644
index 0000000..2717fa3
--- /dev/null
+++ b/ccan/list/list.c
@@ -0,0 +1,43 @@
+/* Licensed under BSD-MIT - see LICENSE file for details */
+#include <stdio.h>
+#include <stdlib.h>
+#include "list.h"
+
+static void *corrupt(const char *abortstr,
+ const struct list_node *head,
+ const struct list_node *node,
+ unsigned int count)
+{
+ if (abortstr) {
+ fprintf(stderr,
+ "%s: prev corrupt in node %p (%u) of %p\n",
+ abortstr, node, count, head);
+ abort();
+ }
+ return NULL;
+}
+
+struct list_node *list_check_node(const struct list_node *node,
+ const char *abortstr)
+{
+ const struct list_node *p, *n;
+ int count = 0;
+
+ for (p = node, n = node->next; n != node; p = n, n = n->next) {
+ count++;
+ if (n->prev != p)
+ return corrupt(abortstr, node, n, count);
+ }
+ /* Check prev on head node. */
+ if (node->prev != p)
+ return corrupt(abortstr, node, node, 0);
+
+ return (struct list_node *)node;
+}
+
+struct list_head *list_check(const struct list_head *h, const char *abortstr)
+{
+ if (!list_check_node(&h->n, abortstr))
+ return NULL;
+ return (struct list_head *)h;
+}
diff --git a/ccan/list/list.h b/ccan/list/list.h
new file mode 100644
index 0000000..dac63ad
--- /dev/null
+++ b/ccan/list/list.h
@@ -0,0 +1,492 @@
+/* Licensed under BSD-MIT - see LICENSE file for details */
+#ifndef CCAN_LIST_H
+#define CCAN_LIST_H
+#include <stdbool.h>
+#include <assert.h>
+#include <ccan/container_of/container_of.h>
+#include <ccan/check_type/check_type.h>
+
+/**
+ * struct list_node - an entry in a doubly-linked list
+ * @next: next entry (self if empty)
+ * @prev: previous entry (self if empty)
+ *
+ * This is used as an entry in a linked list.
+ * Example:
+ * struct child {
+ * const char *name;
+ * // Linked list of all us children.
+ * struct list_node list;
+ * };
+ */
+struct list_node
+{
+ struct list_node *next, *prev;
+};
+
+/**
+ * struct list_head - the head of a doubly-linked list
+ * @h: the list_head (containing next and prev pointers)
+ *
+ * This is used as the head of a linked list.
+ * Example:
+ * struct parent {
+ * const char *name;
+ * struct list_head children;
+ * unsigned int num_children;
+ * };
+ */
+struct list_head
+{
+ struct list_node n;
+};
+
+/**
+ * list_check - check head of a list for consistency
+ * @h: the list_head
+ * @abortstr: the location to print on aborting, or NULL.
+ *
+ * Because list_nodes have redundant information, consistency checking between
+ * the back and forward links can be done. This is useful as a debugging check.
+ * If @abortstr is non-NULL, that will be printed in a diagnostic if the list
+ * is inconsistent, and the function will abort.
+ *
+ * Returns the list head if the list is consistent, NULL if not (it
+ * can never return NULL if @abortstr is set).
+ *
+ * See also: list_check_node()
+ *
+ * Example:
+ * static void dump_parent(struct parent *p)
+ * {
+ * struct child *c;
+ *
+ * printf("%s (%u children):\n", p->name, p->num_children);
+ * list_check(&p->children, "bad child list");
+ * list_for_each(&p->children, c, list)
+ * printf(" -> %s\n", c->name);
+ * }
+ */
+struct list_head *list_check(const struct list_head *h, const char *abortstr);
+
+/**
+ * list_check_node - check node of a list for consistency
+ * @n: the list_node
+ * @abortstr: the location to print on aborting, or NULL.
+ *
+ * Check consistency of the list node is in (it must be in one).
+ *
+ * See also: list_check()
+ *
+ * Example:
+ * static void dump_child(const struct child *c)
+ * {
+ * list_check_node(&c->list, "bad child list");
+ * printf("%s\n", c->name);
+ * }
+ */
+struct list_node *list_check_node(const struct list_node *n,
+ const char *abortstr);
+
+#ifdef CCAN_LIST_DEBUG
+#define list_debug(h) list_check((h), __func__)
+#define list_debug_node(n) list_check_node((n), __func__)
+#else
+#define list_debug(h) (h)
+#define list_debug_node(n) (n)
+#endif
+
+/**
+ * LIST_HEAD_INIT - initializer for an empty list_head
+ * @name: the name of the list.
+ *
+ * Explicit initializer for an empty list.
+ *
+ * See also:
+ * LIST_HEAD, list_head_init()
+ *
+ * Example:
+ * static struct list_head my_list = LIST_HEAD_INIT(my_list);
+ */
+#define LIST_HEAD_INIT(name) { { &name.n, &name.n } }
+
+/**
+ * LIST_HEAD - define and initialize an empty list_head
+ * @name: the name of the list.
+ *
+ * The LIST_HEAD macro defines a list_head and initializes it to an empty
+ * list. It can be prepended by "static" to define a static list_head.
+ *
+ * See also:
+ * LIST_HEAD_INIT, list_head_init()
+ *
+ * Example:
+ * static LIST_HEAD(my_global_list);
+ */
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+/**
+ * list_head_init - initialize a list_head
+ * @h: the list_head to set to the empty list
+ *
+ * Example:
+ * ...
+ * struct parent *parent = malloc(sizeof(*parent));
+ *
+ * list_head_init(&parent->children);
+ * parent->num_children = 0;
+ */
+static inline void list_head_init(struct list_head *h)
+{
+ h->n.next = h->n.prev = &h->n;
+}
+
+/**
+ * list_add - add an entry at the start of a linked list.
+ * @h: the list_head to add the node to
+ * @n: the list_node to add to the list.
+ *
+ * The list_node does not need to be initialized; it will be overwritten.
+ * Example:
+ * struct child *child = malloc(sizeof(*child));
+ *
+ * child->name = "marvin";
+ * list_add(&parent->children, &child->list);
+ * parent->num_children++;
+ */
+static inline void list_add(struct list_head *h, struct list_node *n)
+{
+ n->next = h->n.next;
+ n->prev = &h->n;
+ h->n.next->prev = n;
+ h->n.next = n;
+ (void)list_debug(h);
+}
+
+/**
+ * list_add_tail - add an entry at the end of a linked list.
+ * @h: the list_head to add the node to
+ * @n: the list_node to add to the list.
+ *
+ * The list_node does not need to be initialized; it will be overwritten.
+ * Example:
+ * list_add_tail(&parent->children, &child->list);
+ * parent->num_children++;
+ */
+static inline void list_add_tail(struct list_head *h, struct list_node *n)
+{
+ n->next = &h->n;
+ n->prev = h->n.prev;
+ h->n.prev->next = n;
+ h->n.prev = n;
+ (void)list_debug(h);
+}
+
+/**
+ * list_empty - is a list empty?
+ * @h: the list_head
+ *
+ * If the list is empty, returns true.
+ *
+ * Example:
+ * assert(list_empty(&parent->children) == (parent->num_children == 0));
+ */
+static inline bool list_empty(const struct list_head *h)
+{
+ (void)list_debug(h);
+ return h->n.next == &h->n;
+}
+
+/**
+ * list_del - delete an entry from an (unknown) linked list.
+ * @n: the list_node to delete from the list.
+ *
+ * Note that this leaves @n in an undefined state; it can be added to
+ * another list, but not deleted again.
+ *
+ * See also:
+ * list_del_from()
+ *
+ * Example:
+ * list_del(&child->list);
+ * parent->num_children--;
+ */
+static inline void list_del(struct list_node *n)
+{
+ (void)list_debug_node(n);
+ n->next->prev = n->prev;
+ n->prev->next = n->next;
+#ifdef CCAN_LIST_DEBUG
+ /* Catch use-after-del. */
+ n->next = n->prev = NULL;
+#endif
+}
+
+/**
+ * list_del_from - delete an entry from a known linked list.
+ * @h: the list_head the node is in.
+ * @n: the list_node to delete from the list.
+ *
+ * This explicitly indicates which list a node is expected to be in,
+ * which is better documentation and can catch more bugs.
+ *
+ * See also: list_del()
+ *
+ * Example:
+ * list_del_from(&parent->children, &child->list);
+ * parent->num_children--;
+ */
+static inline void list_del_from(struct list_head *h, struct list_node *n)
+{
+#ifdef CCAN_LIST_DEBUG
+ {
+ /* Thorough check: make sure it was in list! */
+ struct list_node *i;
+ for (i = h->n.next; i != n; i = i->next)
+ assert(i != &h->n);
+ }
+#endif /* CCAN_LIST_DEBUG */
+
+ /* Quick test that catches a surprising number of bugs. */
+ assert(!list_empty(h));
+ list_del(n);
+}
+
+/**
+ * list_entry - convert a list_node back into the structure containing it.
+ * @n: the list_node
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ *
+ * Example:
+ * // First list entry is children.next; convert back to child.
+ * child = list_entry(parent->children.n.next, struct child, list);
+ *
+ * See Also:
+ * list_top(), list_for_each()
+ */
+#define list_entry(n, type, member) container_of(n, type, member)
+
+/**
+ * list_top - get the first entry in a list
+ * @h: the list_head
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ *
+ * If the list is empty, returns NULL.
+ *
+ * Example:
+ * struct child *first;
+ * first = list_top(&parent->children, struct child, list);
+ * if (!first)
+ * printf("Empty list!\n");
+ */
+#define list_top(h, type, member) \
+ ((type *)list_top_((h), list_off_(type, member)))
+
+static inline const void *list_top_(const struct list_head *h, size_t off)
+{
+ if (list_empty(h))
+ return NULL;
+ return (const char *)h->n.next - off;
+}
+
+/**
+ * list_pop - get the first entry in a list and dequeue it
+ * @h: the list_head
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ */
+#define list_pop(h, type, member) \
+ ((type *)list_pop_((h), list_off_(type, member)))
+static inline const void *list_pop_(struct list_head *h, size_t off)
+{
+ struct list_node *n;
+
+ if (list_empty(h))
+ return NULL;
+ n = h->n.next;
+ list_del(n);
+ return (const char *)n - off;
+}
+
+/**
+ * list_tail - get the last entry in a list
+ * @h: the list_head
+ * @type: the type of the entry
+ * @member: the list_node member of the type
+ *
+ * If the list is empty, returns NULL.
+ *
+ * Example:
+ * struct child *last;
+ * last = list_tail(&parent->children, struct child, list);
+ * if (!last)
+ * printf("Empty list!\n");
+ */
+#define list_tail(h, type, member) \
+ ((type *)list_tail_((h), list_off_(type, member)))
+
+static inline const void *list_tail_(const struct list_head *h, size_t off)
+{
+ if (list_empty(h))
+ return NULL;
+ return (const char *)h->n.prev - off;
+}
+
+/**
+ * list_for_each - iterate through a list.
+ * @h: the list_head (warning: evaluated multiple times!)
+ * @i: the structure containing the list_node
+ * @member: the list_node member of the structure
+ *
+ * This is a convenient wrapper to iterate @i over the entire list. It's
+ * a for loop, so you can break and continue as normal.
+ *
+ * Example:
+ * list_for_each(&parent->children, child, list)
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each(h, i, member) \
+ list_for_each_off(h, i, list_off_var_(i, member))
+
+/**
+ * list_for_each_rev - iterate through a list backwards.
+ * @h: the list_head
+ * @i: the structure containing the list_node
+ * @member: the list_node member of the structure
+ *
+ * This is a convenient wrapper to iterate @i over the entire list. It's
+ * a for loop, so you can break and continue as normal.
+ *
+ * Example:
+ * list_for_each_rev(&parent->children, child, list)
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each_rev(h, i, member) \
+ for (i = container_of_var(list_debug(h)->n.prev, i, member); \
+ &i->member != &(h)->n; \
+ i = container_of_var(i->member.prev, i, member))
+
+/**
+ * list_for_each_safe - iterate through a list, maybe during deletion
+ * @h: the list_head
+ * @i: the structure containing the list_node
+ * @nxt: the structure containing the list_node
+ * @member: the list_node member of the structure
+ *
+ * This is a convenient wrapper to iterate @i over the entire list. It's
+ * a for loop, so you can break and continue as normal. The extra variable
+ * @nxt is used to hold the next element, so you can delete @i from the list.
+ *
+ * Example:
+ * struct child *next;
+ * list_for_each_safe(&parent->children, child, next, list) {
+ * list_del(&child->list);
+ * parent->num_children--;
+ * }
+ */
+#define list_for_each_safe(h, i, nxt, member) \
+ list_for_each_safe_off(h, i, nxt, list_off_var_(i, member))
+
+/**
+ * list_for_each_off - iterate through a list of memory regions.
+ * @h: the list_head
+ * @i: the pointer to a memory region wich contains list node data.
+ * @off: offset(relative to @i) at which list node data resides.
+ *
+ * This is a low-level wrapper to iterate @i over the entire list, used to
+ * implement all oher, more high-level, for-each constructs. It's a for loop,
+ * so you can break and continue as normal.
+ *
+ * WARNING! Being the low-level macro that it is, this wrapper doesn't know
+ * nor care about the type of @i. The only assumtion made is that @i points
+ * to a chunk of memory that at some @offset, relative to @i, contains a
+ * properly filled `struct node_list' which in turn contains pointers to
+ * memory chunks and it's turtles all the way down. Whith all that in mind
+ * remember that given the wrong pointer/offset couple this macro will
+ * happilly churn all you memory untill SEGFAULT stops it, in other words
+ * caveat emptor.
+ *
+ * It is worth mentioning that one of legitimate use-cases for that wrapper
+ * is operation on opaque types with known offset for `struct list_node'
+ * member(preferably 0), because it allows you not to disclose the type of
+ * @i.
+ *
+ * Example:
+ * list_for_each_off(&parent->children, child,
+ * offsetof(struct child, list))
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each_off(h, i, off) \
+ for (i = list_node_to_off_(list_debug(h)->n.next, (off)); \
+ list_node_from_off_((void *)i, (off)) != &(h)->n; \
+ i = list_node_to_off_(list_node_from_off_((void *)i, (off))->next, \
+ (off)))
+
+/**
+ * list_for_each_safe_off - iterate through a list of memory regions, maybe
+ * during deletion
+ * @h: the list_head
+ * @i: the pointer to a memory region wich contains list node data.
+ * @nxt: the structure containing the list_node
+ * @off: offset(relative to @i) at which list node data resides.
+ *
+ * For details see `list_for_each_off' and `list_for_each_safe'
+ * descriptions.
+ *
+ * Example:
+ * list_for_each_safe_off(&parent->children, child,
+ * next, offsetof(struct child, list))
+ * printf("Name: %s\n", child->name);
+ */
+#define list_for_each_safe_off(h, i, nxt, off) \
+ for (i = list_node_to_off_(list_debug(h)->n.next, (off)), \
+ nxt = list_node_to_off_(list_node_from_off_(i, (off))->next, \
+ (off)); \
+ list_node_from_off_(i, (off)) != &(h)->n; \
+ i = nxt, \
+ nxt = list_node_to_off_(list_node_from_off_(i, (off))->next, \
+ (off)))
+
+
+/* Other -off variants. */
+#define list_entry_off(n, type, off) \
+ ((type *)list_node_from_off_((n), (off)))
+
+#define list_head_off(h, type, off) \
+ ((type *)list_head_off((h), (off)))
+
+#define list_tail_off(h, type, off) \
+ ((type *)list_tail_((h), (off)))
+
+#define list_add_off(h, n, off) \
+ list_add((h), list_node_from_off_((n), (off)))
+
+#define list_del_off(n, off) \
+ list_del(list_node_from_off_((n), (off)))
+
+#define list_del_from_off(h, n, off) \
+ list_del_from(h, list_node_from_off_((n), (off)))
+
+/* Offset helper functions so we only single-evaluate. */
+static inline void *list_node_to_off_(struct list_node *node, size_t off)
+{
+ return (void *)((char *)node - off);
+}
+static inline struct list_node *list_node_from_off_(void *ptr, size_t off)
+{
+ return (struct list_node *)((char *)ptr + off);
+}
+
+/* Get the offset of the member, but make sure it's a list_node. */
+#define list_off_(type, member) \
+ (container_off(type, member) + \
+ check_type(((type *)0)->member, struct list_node))
+
+#define list_off_var_(var, member) \
+ (container_off_var(var, member) + \
+ check_type(var->member, struct list_node))
+
+#endif /* CCAN_LIST_H */
diff --git a/ccan/list/test/compile_ok-constant.c b/ccan/list/test/compile_ok-constant.c
new file mode 100644
index 0000000..c57cdad
--- /dev/null
+++ b/ccan/list/test/compile_ok-constant.c
@@ -0,0 +1,49 @@
+#include <ccan/list/list.h>
+#include <ccan/tap/tap.h>
+#include <ccan/list/list.c>
+#include <stdbool.h>
+#include <stdio.h>
+
+struct child {
+ const char *name;
+ struct list_node list;
+};
+
+static bool children(const struct list_head *list)
+{
+ return !list_empty(list);
+}
+
+static const struct child *first_child(const struct list_head *list)
+{
+ return list_top(list, struct child, list);
+}
+
+static const struct child *last_child(const struct list_head *list)
+{
+ return list_tail(list, struct child, list);
+}
+
+static void check_children(const struct list_head *list)
+{
+ list_check(list, "bad child list");
+}
+
+static void print_children(const struct list_head *list)
+{
+ const struct child *c;
+ list_for_each(list, c, list)
+ printf("%s\n", c->name);
+}
+
+int main(void)
+{
+ LIST_HEAD(h);
+
+ children(&h);
+ first_child(&h);
+ last_child(&h);
+ check_children(&h);
+ print_children(&h);
+ return 0;
+}
diff --git a/ccan/list/test/helper.c b/ccan/list/test/helper.c
new file mode 100644
index 0000000..ca4700b
--- /dev/null
+++ b/ccan/list/test/helper.c
@@ -0,0 +1,56 @@
+#include <stdlib.h>
+#include <stdbool.h>
+#include <timebase.h>
+
+#include <ccan/list/list.h>
+#include "helper.h"
+
+#define ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING \
+ (42)
+
+struct opaque {
+ struct list_node list;
+ size_t secret_offset;
+ char secret_drawer[42];
+};
+
+static bool not_randomized = true;
+
+struct opaque *create_opaque_blob(void)
+{
+ struct opaque *blob = calloc(1, sizeof(struct opaque));
+
+ if (not_randomized) {
+ srandom((int)time(NULL));
+ not_randomized = false;
+ }
+
+ blob->secret_offset = random() % (sizeof(blob->secret_drawer));
+ blob->secret_drawer[blob->secret_offset] =
+ ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING;
+
+ return blob;
+}
+
+bool if_blobs_know_the_secret(struct opaque *blob)
+{
+ bool answer = true;
+ int i;
+ for (i = 0; i < sizeof(blob->secret_drawer) /
+ sizeof(blob->secret_drawer[0]); i++)
+ if (i != blob->secret_offset)
+ answer = answer && (blob->secret_drawer[i] == 0);
+ else
+ answer = answer &&
+ (blob->secret_drawer[blob->secret_offset] ==
+ ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING);
+
+ return answer;
+}
+
+void destroy_opaque_blob(struct opaque *blob)
+{
+ free(blob);
+}
+
+
diff --git a/ccan/list/test/helper.h b/ccan/list/test/helper.h
new file mode 100644
index 0000000..4b64a7d
--- /dev/null
+++ b/ccan/list/test/helper.h
@@ -0,0 +1,9 @@
+/* These are in a separate C file so we can test undefined structures. */
+struct opaque;
+typedef struct opaque opaque_t;
+
+opaque_t *create_opaque_blob(void);
+bool if_blobs_know_the_secret(opaque_t *blob);
+void destroy_opaque_blob(opaque_t *blob);
+
+
diff --git a/ccan/list/test/run-check-corrupt.c b/ccan/list/test/run-check-corrupt.c
new file mode 100644
index 0000000..5dd9f9c
--- /dev/null
+++ b/ccan/list/test/run-check-corrupt.c
@@ -0,0 +1,89 @@
+#include <setjmp.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <err.h>
+
+/* We don't actually want it to exit... */
+static jmp_buf aborted;
+#define abort() longjmp(aborted, 1)
+
+#define fprintf my_fprintf
+static char printf_buffer[1000];
+
+static int my_fprintf(FILE *stream, const char *format, ...)
+{
+ va_list ap;
+ int ret;
+ va_start(ap, format);
+ ret = vsprintf(printf_buffer, format, ap);
+ va_end(ap);
+ return ret;
+}
+
+#include <ccan/list/list.h>
+#include <ccan/tap/tap.h>
+#include <ccan/list/list.c>
+
+int main(int argc, char *argv[])
+{
+ struct list_head list;
+ struct list_node n1;
+ char expect[100];
+
+ plan_tests(9);
+ /* Empty list. */
+ list.n.next = &list.n;
+ list.n.prev = &list.n;
+ ok1(list_check(&list, NULL) == &list);
+
+ /* Bad back ptr */
+ list.n.prev = &n1;
+ /* Non-aborting version. */
+ ok1(list_check(&list, NULL) == NULL);
+
+ /* Aborting version. */
+ sprintf(expect, "test message: prev corrupt in node %p (0) of %p\n",
+ &list, &list);
+ if (setjmp(aborted) == 0) {
+ list_check(&list, "test message");
+ fail("list_check on empty with bad back ptr didn't fail!");
+ } else {
+ ok1(strcmp(printf_buffer, expect) == 0);
+ }
+
+ /* n1 in list. */
+ list.n.next = &n1;
+ list.n.prev = &n1;
+ n1.prev = &list.n;
+ n1.next = &list.n;
+ ok1(list_check(&list, NULL) == &list);
+ ok1(list_check_node(&n1, NULL) == &n1);
+
+ /* Bad back ptr */
+ n1.prev = &n1;
+ ok1(list_check(&list, NULL) == NULL);
+ ok1(list_check_node(&n1, NULL) == NULL);
+
+ /* Aborting version. */
+ sprintf(expect, "test message: prev corrupt in node %p (1) of %p\n",
+ &n1, &list);
+ if (setjmp(aborted) == 0) {
+ list_check(&list, "test message");
+ fail("list_check on n1 bad back ptr didn't fail!");
+ } else {
+ ok1(strcmp(printf_buffer, expect) == 0);
+ }
+
+ sprintf(expect, "test message: prev corrupt in node %p (0) of %p\n",
+ &n1, &n1);
+ if (setjmp(aborted) == 0) {
+ list_check_node(&n1, "test message");
+ fail("list_check_node on n1 bad back ptr didn't fail!");
+ } else {
+ ok1(strcmp(printf_buffer, expect) == 0);
+ }
+
+ return exit_status();
+}
diff --git a/ccan/list/test/run-list_del_from-assert.c b/ccan/list/test/run-list_del_from-assert.c
new file mode 100644
index 0000000..05d6cad
--- /dev/null
+++ b/ccan/list/test/run-list_del_from-assert.c
@@ -0,0 +1,36 @@
+#define CCAN_LIST_DEBUG 1
+#include <ccan/list/list.h>
+#include <ccan/tap/tap.h>
+#include <ccan/list/list.c>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <signal.h>
+
+int main(int argc, char *argv[])
+{
+ struct list_head list1, list2;
+ struct list_node n1, n2, n3;
+ pid_t child;
+ int status;
+
+ plan_tests(1);
+ list_head_init(&list1);
+ list_head_init(&list2);
+ list_add(&list1, &n1);
+ list_add(&list2, &n2);
+ list_add_tail(&list2, &n3);
+
+ child = fork();
+ if (child) {
+ wait(&status);
+ } else {
+ /* This should abort. */
+ list_del_from(&list1, &n3);
+ exit(0);
+ }
+
+ ok1(WIFSIGNALED(status) && WTERMSIG(status) == SIGABRT);
+ list_del_from(&list2, &n3);
+ return exit_status();
+}
diff --git a/ccan/list/test/run-single-eval.c b/ccan/list/test/run-single-eval.c
new file mode 100644
index 0000000..f90eed3
--- /dev/null
+++ b/ccan/list/test/run-single-eval.c
@@ -0,0 +1,168 @@
+/* Make sure macros only evaluate their args once. */
+#include <ccan/list/list.h>
+#include <ccan/tap/tap.h>
+#include <ccan/list/list.c>
+
+struct parent {
+ const char *name;
+ struct list_head children;
+ unsigned int num_children;
+ int eval_count;
+};
+
+struct child {
+ const char *name;
+ struct list_node list;
+};
+
+static LIST_HEAD(static_list);
+
+#define ref(obj, counter) ((counter)++, (obj))
+
+int main(int argc, char *argv[])
+{
+ struct parent parent;
+ struct child c1, c2, c3, *c, *n;
+ unsigned int i;
+ unsigned int static_count = 0, parent_count = 0, list_count = 0,
+ node_count = 0;
+ struct list_head list = LIST_HEAD_INIT(list);
+
+ plan_tests(74);
+ /* Test LIST_HEAD, LIST_HEAD_INIT, list_empty and check_list */
+ ok1(list_empty(ref(&static_list, static_count)));
+ ok1(static_count == 1);
+ ok1(list_check(ref(&static_list, static_count), NULL));
+ ok1(static_count == 2);
+ ok1(list_empty(ref(&list, list_count)));
+ ok1(list_count == 1);
+ ok1(list_check(ref(&list, list_count), NULL));
+ ok1(list_count == 2);
+
+ parent.num_children = 0;
+ list_head_init(ref(&parent.children, parent_count));
+ ok1(parent_count == 1);
+ /* Test list_head_init */
+ ok1(list_empty(ref(&parent.children, parent_count)));
+ ok1(parent_count == 2);
+ ok1(list_check(ref(&parent.children, parent_count), NULL));
+ ok1(parent_count == 3);
+
+ c2.name = "c2";
+ list_add(ref(&parent.children, parent_count), &c2.list);
+ ok1(parent_count == 4);
+ /* Test list_add and !list_empty. */
+ ok1(!list_empty(ref(&parent.children, parent_count)));
+ ok1(parent_count == 5);
+ ok1(c2.list.next == &parent.children.n);
+ ok1(c2.list.prev == &parent.children.n);
+ ok1(parent.children.n.next == &c2.list);
+ ok1(parent.children.n.prev == &c2.list);
+ /* Test list_check */
+ ok1(list_check(ref(&parent.children, parent_count), NULL));
+ ok1(parent_count == 6);
+
+ c1.name = "c1";
+ list_add(ref(&parent.children, parent_count), &c1.list);
+ ok1(parent_count == 7);
+ /* Test list_add and !list_empty. */
+ ok1(!list_empty(ref(&parent.children, parent_count)));
+ ok1(parent_count == 8);
+ ok1(c2.list.next == &parent.children.n);
+ ok1(c2.list.prev == &c1.list);
+ ok1(parent.children.n.next == &c1.list);
+ ok1(parent.children.n.prev == &c2.list);
+ ok1(c1.list.next == &c2.list);
+ ok1(c1.list.prev == &parent.children.n);
+ /* Test list_check */
+ ok1(list_check(ref(&parent.children, parent_count), NULL));
+ ok1(parent_count == 9);
+
+ c3.name = "c3";
+ list_add_tail(ref(&parent.children, parent_count), &c3.list);
+ ok1(parent_count == 10);
+ /* Test list_add_tail and !list_empty. */
+ ok1(!list_empty(ref(&parent.children, parent_count)));
+ ok1(parent_count == 11);
+ ok1(parent.children.n.next == &c1.list);
+ ok1(parent.children.n.prev == &c3.list);
+ ok1(c1.list.next == &c2.list);
+ ok1(c1.list.prev == &parent.children.n);
+ ok1(c2.list.next == &c3.list);
+ ok1(c2.list.prev == &c1.list);
+ ok1(c3.list.next == &parent.children.n);
+ ok1(c3.list.prev == &c2.list);
+ /* Test list_check */
+ ok1(list_check(ref(&parent.children, parent_count), NULL));
+ ok1(parent_count == 12);
+
+ /* Test list_check_node */
+ ok1(list_check_node(&c1.list, NULL));
+ ok1(list_check_node(&c2.list, NULL));
+ ok1(list_check_node(&c3.list, NULL));
+
+ /* Test list_top */
+ ok1(list_top(ref(&parent.children, parent_count), struct child, list) == &c1);
+ ok1(parent_count == 13);
+
+ /* Test list_tail */
+ ok1(list_tail(ref(&parent.children, parent_count), struct child, list) == &c3);
+ ok1(parent_count == 14);
+
+ /* Test list_for_each. */
+ i = 0;
+ list_for_each(&parent.children, c, list) {
+ switch (i++) {
+ case 0:
+ ok1(c == &c1);
+ break;
+ case 1:
+ ok1(c == &c2);
+ break;
+ case 2:
+ ok1(c == &c3);
+ break;
+ }
+ if (i > 2)
+ break;
+ }
+ ok1(i == 3);
+
+ /* Test list_for_each_safe, list_del and list_del_from. */
+ i = 0;
+ list_for_each_safe(&parent.children, c, n, list) {
+ switch (i++) {
+ case 0:
+ ok1(c == &c1);
+ list_del(ref(&c->list, node_count));
+ ok1(node_count == 1);
+ break;
+ case 1:
+ ok1(c == &c2);
+ list_del_from(ref(&parent.children, parent_count),
+ ref(&c->list, node_count));
+ ok1(node_count == 2);
+ break;
+ case 2:
+ ok1(c == &c3);
+ list_del_from(ref(&parent.children, parent_count),
+ ref(&c->list, node_count));
+ ok1(node_count == 3);
+ break;
+ }
+ ok1(list_check(ref(&parent.children, parent_count), NULL));
+ if (i > 2)
+ break;
+ }
+ ok1(i == 3);
+ ok1(parent_count == 19);
+ ok1(list_empty(ref(&parent.children, parent_count)));
+ ok1(parent_count == 20);
+
+ /* Test list_top/list_tail on empty list. */
+ ok1(list_top(ref(&parent.children, parent_count), struct child, list) == NULL);
+ ok1(parent_count == 21);
+ ok1(list_tail(ref(&parent.children, parent_count), struct child, list) == NULL);
+ ok1(parent_count == 22);
+ return exit_status();
+}
diff --git a/ccan/list/test/run-with-debug.c b/ccan/list/test/run-with-debug.c
new file mode 100644
index 0000000..d090242
--- /dev/null
+++ b/ccan/list/test/run-with-debug.c
@@ -0,0 +1,3 @@
+/* Just like run.c, but with all debug checks enabled. */
+#define CCAN_LIST_DEBUG 1
+#include <ccan/list/test/run.c>
diff --git a/ccan/list/test/run.c b/ccan/list/test/run.c
new file mode 100644
index 0000000..1d02acd
--- /dev/null
+++ b/ccan/list/test/run.c
@@ -0,0 +1,200 @@
+#include <ccan/list/list.h>
+#include <ccan/tap/tap.h>
+#include <ccan/list/list.c>
+#include "helper.h"
+
+struct parent {
+ const char *name;
+ struct list_head children;
+ unsigned int num_children;
+};
+
+struct child {
+ const char *name;
+ struct list_node list;
+};
+
+static LIST_HEAD(static_list);
+
+int main(int argc, char *argv[])
+{
+ struct parent parent;
+ struct child c1, c2, c3, *c, *n;
+ unsigned int i;
+ struct list_head list = LIST_HEAD_INIT(list);
+ opaque_t *q, *nq;
+ struct list_head opaque_list = LIST_HEAD_INIT(opaque_list);
+
+ plan_tests(65);
+ /* Test LIST_HEAD, LIST_HEAD_INIT, list_empty and check_list */
+ ok1(list_empty(&static_list));
+ ok1(list_check(&static_list, NULL));
+ ok1(list_empty(&list));
+ ok1(list_check(&list, NULL));
+
+ parent.num_children = 0;
+ list_head_init(&parent.children);
+ /* Test list_head_init */
+ ok1(list_empty(&parent.children));
+ ok1(list_check(&parent.children, NULL));
+
+ c2.name = "c2";
+ list_add(&parent.children, &c2.list);
+ /* Test list_add and !list_empty. */
+ ok1(!list_empty(&parent.children));
+ ok1(c2.list.next == &parent.children.n);
+ ok1(c2.list.prev == &parent.children.n);
+ ok1(parent.children.n.next == &c2.list);
+ ok1(parent.children.n.prev == &c2.list);
+ /* Test list_check */
+ ok1(list_check(&parent.children, NULL));
+
+ c1.name = "c1";
+ list_add(&parent.children, &c1.list);
+ /* Test list_add and !list_empty. */
+ ok1(!list_empty(&parent.children));
+ ok1(c2.list.next == &parent.children.n);
+ ok1(c2.list.prev == &c1.list);
+ ok1(parent.children.n.next == &c1.list);
+ ok1(parent.children.n.prev == &c2.list);
+ ok1(c1.list.next == &c2.list);
+ ok1(c1.list.prev == &parent.children.n);
+ /* Test list_check */
+ ok1(list_check(&parent.children, NULL));
+
+ c3.name = "c3";
+ list_add_tail(&parent.children, &c3.list);
+ /* Test list_add_tail and !list_empty. */
+ ok1(!list_empty(&parent.children));
+ ok1(parent.children.n.next == &c1.list);
+ ok1(parent.children.n.prev == &c3.list);
+ ok1(c1.list.next == &c2.list);
+ ok1(c1.list.prev == &parent.children.n);
+ ok1(c2.list.next == &c3.list);
+ ok1(c2.list.prev == &c1.list);
+ ok1(c3.list.next == &parent.children.n);
+ ok1(c3.list.prev == &c2.list);
+ /* Test list_check */
+ ok1(list_check(&parent.children, NULL));
+
+ /* Test list_check_node */
+ ok1(list_check_node(&c1.list, NULL));
+ ok1(list_check_node(&c2.list, NULL));
+ ok1(list_check_node(&c3.list, NULL));
+
+ /* Test list_top */
+ ok1(list_top(&parent.children, struct child, list) == &c1);
+
+ /* Test list_tail */
+ ok1(list_tail(&parent.children, struct child, list) == &c3);
+
+ /* Test list_for_each. */
+ i = 0;
+ list_for_each(&parent.children, c, list) {
+ switch (i++) {
+ case 0:
+ ok1(c == &c1);
+ break;
+ case 1:
+ ok1(c == &c2);
+ break;
+ case 2:
+ ok1(c == &c3);
+ break;
+ }
+ if (i > 2)
+ break;
+ }
+ ok1(i == 3);
+
+ /* Test list_for_each_rev. */
+ i = 0;
+ list_for_each_rev(&parent.children, c, list) {
+ switch (i++) {
+ case 0:
+ ok1(c == &c3);
+ break;
+ case 1:
+ ok1(c == &c2);
+ break;
+ case 2:
+ ok1(c == &c1);
+ break;
+ }
+ if (i > 2)
+ break;
+ }
+ ok1(i == 3);
+
+ /* Test list_for_each_safe, list_del and list_del_from. */
+ i = 0;
+ list_for_each_safe(&parent.children, c, n, list) {
+ switch (i++) {
+ case 0:
+ ok1(c == &c1);
+ list_del(&c->list);
+ break;
+ case 1:
+ ok1(c == &c2);
+ list_del_from(&parent.children, &c->list);
+ break;
+ case 2:
+ ok1(c == &c3);
+ list_del_from(&parent.children, &c->list);
+ break;
+ }
+ ok1(list_check(&parent.children, NULL));
+ if (i > 2)
+ break;
+ }
+ ok1(i == 3);
+ ok1(list_empty(&parent.children));
+
+ /* Test list_for_each_off. */
+ list_add_tail(&opaque_list,
+ (struct list_node *)create_opaque_blob());
+ list_add_tail(&opaque_list,
+ (struct list_node *)create_opaque_blob());
+ list_add_tail(&opaque_list,
+ (struct list_node *)create_opaque_blob());
+
+ i = 0;
+
+ list_for_each_off(&opaque_list, q, 0) {
+ i++;
+ ok1(if_blobs_know_the_secret(q));
+ }
+ ok1(i == 3);
+
+ /* Test list_for_each_safe_off, list_del_off and list_del_from_off. */
+ i = 0;
+ list_for_each_safe_off(&opaque_list, q, nq, 0) {
+ switch (i++) {
+ case 0:
+ ok1(if_blobs_know_the_secret(q));
+ list_del_off(q, 0);
+ destroy_opaque_blob(q);
+ break;
+ case 1:
+ ok1(if_blobs_know_the_secret(q));
+ list_del_from_off(&opaque_list, q, 0);
+ destroy_opaque_blob(q);
+ break;
+ case 2:
+ ok1(c == &c3);
+ list_del_from_off(&opaque_list, q, 0);
+ destroy_opaque_blob(q);
+ break;
+ }
+ ok1(list_check(&opaque_list, NULL));
+ if (i > 2)
+ break;
+ }
+ ok1(i == 3);
+ ok1(list_empty(&opaque_list));
+
+ /* Test list_top/list_tail on empty list. */
+ ok1(list_top(&parent.children, struct child, list) == NULL);
+ ok1(list_tail(&parent.children, struct child, list) == NULL);
+ return exit_status();
+}
diff --git a/ccan/short_types/LICENSE b/ccan/short_types/LICENSE
new file mode 100644
index 0000000..feb9b11
--- /dev/null
+++ b/ccan/short_types/LICENSE
@@ -0,0 +1,28 @@
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/ccan/short_types/_info b/ccan/short_types/_info
new file mode 100644
index 0000000..cfd439e
--- /dev/null
+++ b/ccan/short_types/_info
@@ -0,0 +1,81 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * short_types - shorter names for standard integer types
+ *
+ * "C is a Spartan language, and so should your naming be."
+ * -- Linus Torvalds
+ *
+ * The short_types header provides for convenient abbreviations for the
+ * posixly-damned uint32_t types. It also provides be32/le32 for explicitly
+ * annotating types of specific endian.
+ *
+ * Include this header, if only to stop people using these identifiers
+ * for other things!
+ *
+ * Example:
+ * #include <stdint.h>
+ * #include <string.h>
+ * #include <stdio.h>
+ * #include <ccan/short_types/short_types.h>
+ *
+ * // Print nonsensical numerical comparison of POSIX vs. short_types.
+ * #define stringify_1(x) #x
+ * #define stringify(x) stringify_1(x)
+ *
+ * static void evaluate(size_t size, const char *posix, const char *sht,
+ * unsigned int *posix_total, unsigned int *sht_total,
+ * unsigned int *size_total)
+ * {
+ * printf("\t%ssigned %s: POSIX %zu%%, short %zu%%\n",
+ * sht[0] == 'u' ? "un" : "",
+ * sht+1,
+ * strlen(posix)*100 / size,
+ * strlen(sht)*100 / size);
+ * *posix_total += strlen(posix);
+ * *sht_total += strlen(sht);
+ * *size_total += size;
+ * }
+ *
+ * #define EVALUATE(psx, short, pt, st, t) \
+ * evaluate(sizeof(psx), stringify(psx), stringify(sht), pt, st, t)
+ *
+ * int main(void)
+ * {
+ * unsigned int posix_total = 0, sht_total = 0, size_total = 0;
+ *
+ * printf("Comparing size of type vs size of name:\n");
+ *
+ * EVALUATE(uint8_t, u8, &posix_total, &sht_total, &size_total);
+ * EVALUATE(int8_t, s8, &posix_total, &sht_total, &size_total);
+ * EVALUATE(uint16_t, u16, &posix_total, &sht_total, &size_total);
+ * EVALUATE(int16_t, s16, &posix_total, &sht_total, &size_total);
+ * EVALUATE(uint32_t, u32, &posix_total, &sht_total, &size_total);
+ * EVALUATE(int32_t, s32, &posix_total, &sht_total, &size_total);
+ * EVALUATE(uint64_t, u64, &posix_total, &sht_total, &size_total);
+ * EVALUATE(int64_t, s64, &posix_total, &sht_total, &size_total);
+ *
+ * printf("Conclusion:\n"
+ * "\tPOSIX is %u%% LESS efficient than binary.\n"
+ * "\tshort_types.h is %u%% MORE efficient than binary.\n",
+ * (posix_total - size_total) * 100 / size_total,
+ * (size_total - sht_total) * 100 / size_total);
+ * return 0;
+ * }
+ *
+ * License: CC0 (Public domain)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0) {
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/ccan/short_types/short_types.h b/ccan/short_types/short_types.h
new file mode 100644
index 0000000..f94ec09
--- /dev/null
+++ b/ccan/short_types/short_types.h
@@ -0,0 +1,32 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_SHORT_TYPES_H
+#define CCAN_SHORT_TYPES_H
+#include <stdint.h>
+
+/**
+ * u64/s64/u32/s32/u16/s16/u8/s8 - short names for explicitly-sized types.
+ */
+typedef uint64_t u64;
+typedef int64_t s64;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint8_t u8;
+typedef int8_t s8;
+
+/**
+ * be64/be32/be16 - 64/32/16 bit big-endian representation.
+ */
+typedef uint64_t be64;
+typedef uint32_t be32;
+typedef uint16_t be16;
+
+/**
+ * le64/le32/le16 - 64/32/16 bit little-endian representation.
+ */
+typedef uint64_t le64;
+typedef uint32_t le32;
+typedef uint16_t le16;
+
+#endif /* CCAN_SHORT_TYPES_H */
diff --git a/ccan/short_types/test/run.c b/ccan/short_types/test/run.c
new file mode 100644
index 0000000..5aa80d7
--- /dev/null
+++ b/ccan/short_types/test/run.c
@@ -0,0 +1,38 @@
+#include <ccan/short_types/short_types.h>
+#include <ccan/tap/tap.h>
+#include <stdlib.h>
+#include <err.h>
+
+int main(int argc, char *argv[])
+{
+ plan_tests(22);
+
+ ok1(sizeof(u64) == 8);
+ ok1(sizeof(s64) == 8);
+ ok1(sizeof(u32) == 4);
+ ok1(sizeof(s32) == 4);
+ ok1(sizeof(u16) == 2);
+ ok1(sizeof(s16) == 2);
+ ok1(sizeof(u8) == 1);
+ ok1(sizeof(s8) == 1);
+
+ ok1(sizeof(be64) == 8);
+ ok1(sizeof(be32) == 4);
+ ok1(sizeof(be16) == 2);
+
+ ok1(sizeof(le64) == 8);
+ ok1(sizeof(le32) == 4);
+ ok1(sizeof(le16) == 2);
+
+ /* Signedness tests. */
+ ok1((u64)-1 > 0);
+ ok1((u32)-1 > 0);
+ ok1((u16)-1 > 0);
+ ok1((u8)-1 > 0);
+ ok1((s64)-1 < 0);
+ ok1((s32)-1 < 0);
+ ok1((s16)-1 < 0);
+ ok1((s8)-1 < 0);
+
+ return exit_status();
+}
diff --git a/ccan/str/LICENSE b/ccan/str/LICENSE
new file mode 100644
index 0000000..feb9b11
--- /dev/null
+++ b/ccan/str/LICENSE
@@ -0,0 +1,28 @@
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
+
+ the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
+ moral rights retained by the original author(s) and/or performer(s);
+ publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
+ rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
+ rights protecting the extraction, dissemination, use and reuse of data in a Work;
+ database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
+ other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
+ Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
+ Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
+ Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work.
diff --git a/ccan/str/_info b/ccan/str/_info
new file mode 100644
index 0000000..548f059
--- /dev/null
+++ b/ccan/str/_info
@@ -0,0 +1,52 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * str - string helper routines
+ *
+ * This is a grab bag of functions for string operations, designed to enhance
+ * the standard string.h.
+ *
+ * Note that if you define CCAN_STR_DEBUG, you will get extra compile
+ * checks on common misuses of the following functions (they will now
+ * be out-of-line, so there is a runtime penalty!).
+ *
+ * strstr, strchr, strrchr:
+ * Return const char * if first argument is const (gcc only).
+ *
+ * isalnum, isalpha, isascii, isblank, iscntrl, isdigit, isgraph,
+ * islower, isprint, ispunct, isspace, isupper, isxdigit:
+ * Static and runtime check that input is EOF or an *unsigned*
+ * char, as per C standard (really!).
+ *
+ * Example:
+ * #include <stdio.h>
+ * #include <ccan/str/str.h>
+ *
+ * int main(int argc, char *argv[])
+ * {
+ * if (argv[1] && streq(argv[1], "--verbose"))
+ * printf("verbose set\n");
+ * if (argv[1] && strstarts(argv[1], "--"))
+ * printf("Some option set\n");
+ * if (argv[1] && strends(argv[1], "cow-powers"))
+ * printf("Magic option set\n");
+ * return 0;
+ * }
+ *
+ * License: CC0 (Public domain)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0) {
+ printf("ccan/build_assert\n");
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/ccan/str/str.c b/ccan/str/str.c
new file mode 100644
index 0000000..a9245c1
--- /dev/null
+++ b/ccan/str/str.c
@@ -0,0 +1,13 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#include <ccan/str/str.h>
+
+size_t strcount(const char *haystack, const char *needle)
+{
+ size_t i = 0, nlen = strlen(needle);
+
+ while ((haystack = strstr(haystack, needle)) != NULL) {
+ i++;
+ haystack += nlen;
+ }
+ return i;
+}
diff --git a/ccan/str/str.h b/ccan/str/str.h
new file mode 100644
index 0000000..89668c6
--- /dev/null
+++ b/ccan/str/str.h
@@ -0,0 +1,120 @@
+/* CC0 (Public domain) - see LICENSE file for details */
+#ifndef CCAN_STR_H
+#define CCAN_STR_H
+#include "config.h"
+#include <string.h>
+#include <stdbool.h>
+#include <limits.h>
+#include <ctype.h>
+
+/**
+ * streq - Are two strings equal?
+ * @a: first string
+ * @b: first string
+ *
+ * This macro is arguably more readable than "!strcmp(a, b)".
+ *
+ * Example:
+ * if (streq(somestring, ""))
+ * printf("String is empty!\n");
+ */
+#define streq(a,b) (strcmp((a),(b)) == 0)
+
+/**
+ * strstarts - Does this string start with this prefix?
+ * @str: string to test
+ * @prefix: prefix to look for at start of str
+ *
+ * Example:
+ * if (strstarts(somestring, "foo"))
+ * printf("String %s begins with 'foo'!\n", somestring);
+ */
+#define strstarts(str,prefix) (strncmp((str),(prefix),strlen(prefix)) == 0)
+
+/**
+ * strends - Does this string end with this postfix?
+ * @str: string to test
+ * @postfix: postfix to look for at end of str
+ *
+ * Example:
+ * if (strends(somestring, "foo"))
+ * printf("String %s end with 'foo'!\n", somestring);
+ */
+static inline bool strends(const char *str, const char *postfix)
+{
+ if (strlen(str) < strlen(postfix))
+ return false;
+
+ return streq(str + strlen(str) - strlen(postfix), postfix);
+}
+
+/**
+ * stringify - Turn expression into a string literal
+ * @expr: any C expression
+ *
+ * Example:
+ * #define PRINT_COND_IF_FALSE(cond) \
+ * ((cond) || printf("%s is false!", stringify(cond)))
+ */
+#define stringify(expr) stringify_1(expr)
+/* Double-indirection required to stringify expansions */
+#define stringify_1(expr) #expr
+
+/**
+ * strcount - Count number of (non-overlapping) occurrences of a substring.
+ * @haystack: a C string
+ * @needle: a substring
+ *
+ * Example:
+ * assert(strcount("aaa aaa", "a") == 6);
+ * assert(strcount("aaa aaa", "ab") == 0);
+ * assert(strcount("aaa aaa", "aa") == 2);
+ */
+size_t strcount(const char *haystack, const char *needle);
+
+/**
+ * STR_MAX_CHARS - Maximum possible size of numeric string for this type.
+ * @type_or_expr: a pointer or integer type or expression.
+ *
+ * This provides enough space for a nul-terminated string which represents the
+ * largest possible value for the type or expression.
+ *
+ * Note: The implementation adds extra space so hex values or negative
+ * values will fit (eg. sprintf(... "%p"). )
+ *
+ * Example:
+ * char str[STR_MAX_CHARS(int)];
+ *
+ * sprintf(str, "%i", 7);
+ */
+#define STR_MAX_CHARS(type_or_expr) \
+ ((sizeof(type_or_expr) * CHAR_BIT + 8) / 9 * 3 + 2 \
+ + STR_MAX_CHARS_TCHECK_(type_or_expr))
+
+#if HAVE_TYPEOF
+/* Only a simple type can have 0 assigned, so test that. */
+#define STR_MAX_CHARS_TCHECK_(type_or_expr) \
+ ({ typeof(type_or_expr) x = 0; (void)x; 0; })
+#else
+#define STR_MAX_CHARS_TCHECK_(type_or_expr) 0
+#endif
+
+/* These checks force things out of line, hence they are under DEBUG. */
+#ifdef CCAN_STR_DEBUG
+#if HAVE_TYPEOF
+/* With GNU magic, we can make const-respecting standard string functions. */
+#undef strstr
+#undef strchr
+#undef strrchr
+
+/* + 0 is needed to decay array into pointer. */
+#define strstr(haystack, needle) \
+ ((typeof((haystack) + 0))str_strstr((haystack), (needle)))
+#define strchr(haystack, c) \
+ ((typeof((haystack) + 0))str_strchr((haystack), (c)))
+#define strrchr(haystack, c) \
+ ((typeof((haystack) + 0))str_strrchr((haystack), (c)))
+#endif
+#endif /* CCAN_STR_DEBUG */
+
+#endif /* CCAN_STR_H */
diff --git a/ccan/str/test/compile_fail-STR_MAX_CHARS.c b/ccan/str/test/compile_fail-STR_MAX_CHARS.c
new file mode 100644
index 0000000..74448c1
--- /dev/null
+++ b/ccan/str/test/compile_fail-STR_MAX_CHARS.c
@@ -0,0 +1,23 @@
+#include <ccan/str/str.h>
+
+struct s {
+ int val;
+};
+
+int main(int argc, char *argv[])
+{
+ struct s
+#ifdef FAIL
+#if !HAVE_TYPEOF
+ #error We need typeof to check STR_MAX_CHARS.
+#endif
+#else
+ /* A pointer is OK. */
+ *
+#endif
+ val;
+ char str[STR_MAX_CHARS(val)];
+
+ str[0] = '\0';
+ return str[0] ? 0 : 1;
+}
diff --git a/ccan/str/test/compile_fail-isalnum.c b/ccan/str/test/compile_fail-isalnum.c
new file mode 100644
index 0000000..930deff
--- /dev/null
+++ b/ccan/str/test/compile_fail-isalnum.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isalnum.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isalnum(c);
+}
diff --git a/ccan/str/test/compile_fail-isalpha.c b/ccan/str/test/compile_fail-isalpha.c
new file mode 100644
index 0000000..2005109
--- /dev/null
+++ b/ccan/str/test/compile_fail-isalpha.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isalpha.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isalpha(c);
+}
diff --git a/ccan/str/test/compile_fail-isascii.c b/ccan/str/test/compile_fail-isascii.c
new file mode 100644
index 0000000..ee55e49
--- /dev/null
+++ b/ccan/str/test/compile_fail-isascii.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isascii.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isascii(c);
+}
diff --git a/ccan/str/test/compile_fail-isblank.c b/ccan/str/test/compile_fail-isblank.c
new file mode 100644
index 0000000..f4cb961
--- /dev/null
+++ b/ccan/str/test/compile_fail-isblank.c
@@ -0,0 +1,26 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF || !HAVE_ISBLANK
+#error We need typeof to check isblank.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+#if HAVE_ISBLANK
+ return isblank(c);
+#else
+ return c;
+#endif
+}
diff --git a/ccan/str/test/compile_fail-iscntrl.c b/ccan/str/test/compile_fail-iscntrl.c
new file mode 100644
index 0000000..bc74146
--- /dev/null
+++ b/ccan/str/test/compile_fail-iscntrl.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check iscntrl.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return iscntrl(c);
+}
diff --git a/ccan/str/test/compile_fail-isdigit.c b/ccan/str/test/compile_fail-isdigit.c
new file mode 100644
index 0000000..71d1c71
--- /dev/null
+++ b/ccan/str/test/compile_fail-isdigit.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isdigit.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isdigit(c);
+}
diff --git a/ccan/str/test/compile_fail-islower.c b/ccan/str/test/compile_fail-islower.c
new file mode 100644
index 0000000..ca3f990
--- /dev/null
+++ b/ccan/str/test/compile_fail-islower.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check islower.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return islower(c);
+}
diff --git a/ccan/str/test/compile_fail-isprint.c b/ccan/str/test/compile_fail-isprint.c
new file mode 100644
index 0000000..6432e41
--- /dev/null
+++ b/ccan/str/test/compile_fail-isprint.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isprint.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isprint(c);
+}
diff --git a/ccan/str/test/compile_fail-ispunct.c b/ccan/str/test/compile_fail-ispunct.c
new file mode 100644
index 0000000..5d941fc
--- /dev/null
+++ b/ccan/str/test/compile_fail-ispunct.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check ispunct.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return ispunct(c);
+}
diff --git a/ccan/str/test/compile_fail-isspace.c b/ccan/str/test/compile_fail-isspace.c
new file mode 100644
index 0000000..bfee1f8
--- /dev/null
+++ b/ccan/str/test/compile_fail-isspace.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isspace.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isspace(c);
+}
diff --git a/ccan/str/test/compile_fail-isupper.c b/ccan/str/test/compile_fail-isupper.c
new file mode 100644
index 0000000..4cf9fd3
--- /dev/null
+++ b/ccan/str/test/compile_fail-isupper.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isupper.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isupper(c);
+}
diff --git a/ccan/str/test/compile_fail-isxdigit.c b/ccan/str/test/compile_fail-isxdigit.c
new file mode 100644
index 0000000..65e6006
--- /dev/null
+++ b/ccan/str/test/compile_fail-isxdigit.c
@@ -0,0 +1,22 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF
+#error We need typeof to check isxdigit.
+#endif
+ char
+#else
+ unsigned char
+#endif
+ c = argv[0][0];
+
+#ifdef FAIL
+ /* Fake fail on unsigned char platforms. */
+ BUILD_ASSERT((char)255 < 0);
+#endif
+
+ return isxdigit(c);
+}
diff --git a/ccan/str/test/compile_fail-strchr.c b/ccan/str/test/compile_fail-strchr.c
new file mode 100644
index 0000000..74a7314
--- /dev/null
+++ b/ccan/str/test/compile_fail-strchr.c
@@ -0,0 +1,18 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_TYPEOF
+ #error We need typeof to check strstr.
+#endif
+#else
+ const
+#endif
+ char *ret;
+ const char *str = "hello";
+
+ ret = strchr(str, 'l');
+ return ret ? 0 : 1;
+}
diff --git a/ccan/str/test/compile_fail-strrchr.c b/ccan/str/test/compile_fail-strrchr.c
new file mode 100644
index 0000000..ba7d17e
--- /dev/null
+++ b/ccan/str/test/compile_fail-strrchr.c
@@ -0,0 +1,18 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_TYPEOF
+ #error We need typeof to check strstr.
+#endif
+#else
+ const
+#endif
+ char *ret;
+ const char *str = "hello";
+
+ ret = strrchr(str, 'l');
+ return ret ? 0 : 1;
+}
diff --git a/ccan/str/test/compile_fail-strstr.c b/ccan/str/test/compile_fail-strstr.c
new file mode 100644
index 0000000..deefef6
--- /dev/null
+++ b/ccan/str/test/compile_fail-strstr.c
@@ -0,0 +1,18 @@
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/str.h>
+
+int main(int argc, char *argv[])
+{
+#ifdef FAIL
+#if !HAVE_TYPEOF
+ #error We need typeof to check strstr.
+#endif
+#else
+ const
+#endif
+ char *ret;
+ const char *str = "hello";
+
+ ret = strstr(str, "hell");
+ return ret ? 0 : 1;
+}
diff --git a/ccan/str/test/debug.c b/ccan/str/test/debug.c
new file mode 100644
index 0000000..4bd384f
--- /dev/null
+++ b/ccan/str/test/debug.c
@@ -0,0 +1,5 @@
+/* We can't use the normal "#include the .c file" trick, since this is
+ contaminated by str.h's macro overrides. So we put it in all tests
+ like this. */
+#define CCAN_STR_DEBUG 1
+#include <ccan/str/debug.c>
diff --git a/ccan/str/test/run-STR_MAX_CHARS.c b/ccan/str/test/run-STR_MAX_CHARS.c
new file mode 100644
index 0000000..ae6969c
--- /dev/null
+++ b/ccan/str/test/run-STR_MAX_CHARS.c
@@ -0,0 +1,59 @@
+#include <ccan/str/str.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ccan/tap/tap.h>
+#include <stdint.h>
+
+int main(int argc, char *argv[])
+{
+ char str[1000];
+ struct {
+ uint8_t u1byte;
+ int8_t s1byte;
+ uint16_t u2byte;
+ int16_t s2byte;
+ uint32_t u4byte;
+ int32_t s4byte;
+ uint64_t u8byte;
+ int64_t s8byte;
+ void *ptr;
+ } types;
+
+ plan_tests(13);
+
+ memset(&types, 0xFF, sizeof(types));
+
+ /* Hex versions */
+ sprintf(str, "0x%llx", (unsigned long long)types.u1byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u1byte));
+ sprintf(str, "0x%llx", (unsigned long long)types.u2byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u2byte));
+ sprintf(str, "0x%llx", (unsigned long long)types.u4byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u4byte));
+ sprintf(str, "0x%llx", (unsigned long long)types.u8byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u8byte));
+
+ /* Decimal versions */
+ sprintf(str, "%u", types.u1byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u1byte));
+ sprintf(str, "%d", types.s1byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.s1byte));
+ sprintf(str, "%u", types.u2byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u2byte));
+ sprintf(str, "%d", types.s2byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.s2byte));
+ sprintf(str, "%u", types.u4byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u4byte));
+ sprintf(str, "%d", types.s4byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.s4byte));
+ sprintf(str, "%llu", (unsigned long long)types.u8byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.u8byte));
+ sprintf(str, "%lld", (long long)types.s8byte);
+ ok1(strlen(str) < STR_MAX_CHARS(types.s8byte));
+
+ /* Pointer version. */
+ sprintf(str, "%p", types.ptr);
+ ok1(strlen(str) < STR_MAX_CHARS(types.ptr));
+
+ return exit_status();
+}
diff --git a/ccan/str/test/run.c b/ccan/str/test/run.c
new file mode 100644
index 0000000..0f00ea3
--- /dev/null
+++ b/ccan/str/test/run.c
@@ -0,0 +1,106 @@
+#include <ccan/str/str.h>
+#include <ccan/str/str.c>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ccan/tap/tap.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+
+static const char *substrings[] = { "far", "bar", "baz", "b", "ba", "z", "ar",
+ NULL };
+
+#define NUM_SUBSTRINGS (ARRAY_SIZE(substrings) - 1)
+
+static char *strdup_rev(const char *s)
+{
+ char *ret = strdup(s);
+ unsigned int i;
+
+ for (i = 0; i < strlen(s); i++)
+ ret[i] = s[strlen(s) - i - 1];
+ return ret;
+}
+
+int main(int argc, char *argv[])
+{
+ unsigned int i, j, n;
+ char *strings[NUM_SUBSTRINGS * NUM_SUBSTRINGS];
+
+ n = 0;
+ for (i = 0; i < NUM_SUBSTRINGS; i++) {
+ for (j = 0; j < NUM_SUBSTRINGS; j++) {
+ strings[n] = malloc(strlen(substrings[i])
+ + strlen(substrings[j]) + 1);
+ sprintf(strings[n++], "%s%s",
+ substrings[i], substrings[j]);
+ }
+ }
+
+ plan_tests(n * n * 5 + 16);
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < n; j++) {
+ unsigned int k, identical = 0;
+ char *reva, *revb;
+
+ /* Find first difference. */
+ for (k = 0; strings[i][k]==strings[j][k]; k++) {
+ if (k == strlen(strings[i])) {
+ identical = 1;
+ break;
+ }
+ }
+
+ if (identical)
+ ok1(streq(strings[i], strings[j]));
+ else
+ ok1(!streq(strings[i], strings[j]));
+
+ /* Postfix test should be equivalent to prefix
+ * test on reversed string. */
+ reva = strdup_rev(strings[i]);
+ revb = strdup_rev(strings[j]);
+
+ if (!strings[i][k]) {
+ ok1(strstarts(strings[j], strings[i]));
+ ok1(strends(revb, reva));
+ } else {
+ ok1(!strstarts(strings[j], strings[i]));
+ ok1(!strends(revb, reva));
+ }
+ if (!strings[j][k]) {
+ ok1(strstarts(strings[i], strings[j]));
+ ok1(strends(reva, revb));
+ } else {
+ ok1(!strstarts(strings[i], strings[j]));
+ ok1(!strends(reva, revb));
+ }
+ free(reva);
+ free(revb);
+ }
+ }
+
+ for (i = 0; i < n; i++)
+ free(strings[i]);
+
+ ok1(streq(stringify(NUM_SUBSTRINGS),
+ "((sizeof(substrings) / sizeof(substrings[0])) - 1)"));
+ ok1(streq(stringify(ARRAY_SIZE(substrings)),
+ "(sizeof(substrings) / sizeof(substrings[0]))"));
+ ok1(streq(stringify(i == 0), "i == 0"));
+
+ ok1(strcount("aaaaaa", "b") == 0);
+ ok1(strcount("aaaaaa", "a") == 6);
+ ok1(strcount("aaaaaa", "aa") == 3);
+ ok1(strcount("aaaaaa", "aaa") == 2);
+ ok1(strcount("aaaaaa", "aaaa") == 1);
+ ok1(strcount("aaaaaa", "aaaaa") == 1);
+ ok1(strcount("aaaaaa", "aaaaaa") == 1);
+ ok1(strcount("aaa aaa", "b") == 0);
+ ok1(strcount("aaa aaa", "a") == 6);
+ ok1(strcount("aaa aaa", "aa") == 2);
+ ok1(strcount("aaa aaa", "aaa") == 2);
+ ok1(strcount("aaa aaa", "aaaa") == 0);
+ ok1(strcount("aaa aaa", "aaaaa") == 0);
+
+ return exit_status();
+}
diff --git a/core/Makefile.inc b/core/Makefile.inc
new file mode 100644
index 0000000..843ce05
--- /dev/null
+++ b/core/Makefile.inc
@@ -0,0 +1,12 @@
+# -*-Makefile-*-
+
+SUBDIRS += core
+CORE_OBJS = relocate.o console.o backtrace.o init.o chip.o mem_region.o
+CORE_OBJS += malloc.o lock.o cpu.o utils.o fdt.o opal.o interrupts.o
+CORE_OBJS += timebase.o opal-msg.o pci.o pci-opal.o fast-reboot.o
+CORE_OBJS += device.o exceptions.o trace.o affinity.o vpd.o
+CORE_OBJS += hostservices.o platform.o nvram.o flash-nvram.o
+CORE=core/built-in.o
+
+$(CORE): $(CORE_OBJS:%=core/%)
+
diff --git a/core/affinity.c b/core/affinity.c
new file mode 100644
index 0000000..d5eea82
--- /dev/null
+++ b/core/affinity.c
@@ -0,0 +1,132 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ *
+ * We currently construct our associativity properties as such:
+ *
+ * - For "chip" devices (bridges, memory, ...), 4 entries:
+ *
+ * - CCM node ID
+ * - HW card ID
+ * - HW module ID
+ * - Chip ID
+ *
+ * The information is constructed based on the chip ID which (unlike
+ * pHyp) is our HW chip ID (aka "XSCOM" chip ID). We use it to retrieve
+ * the other properties from the corresponding chip/xscom node in the
+ * device-tree. If those properties are absent, 0 is used.
+ *
+ * - For "core" devices, we add a 5th entry:
+ *
+ * - Core ID
+ *
+ * Here too, we do not use the "cooked" HW processor ID from HDAT but
+ * intead use the real HW core ID which is basically the interrupt
+ * server number of thread 0 on that core.
+ *
+ *
+ * The ibm,associativity-reference-points property is currently set to
+ * 4,4 indicating that the chip ID is our only reference point. This
+ * should be extended to encompass the node IDs eventually.
+ */
+#include <skiboot.h>
+#include <opal.h>
+#include <device.h>
+#include <console.h>
+#include <trace.h>
+#include <chip.h>
+#include <cpu.h>
+#include <affinity.h>
+
+static uint32_t get_chip_node_id(struct proc_chip *chip)
+{
+ /* If the xscom node has an ibm,ccm-node-id property, use it */
+ if (dt_has_node_property(chip->devnode, "ibm,ccm-node-id", NULL))
+ return dt_prop_get_u32(chip->devnode, "ibm,ccm-node-id");
+
+ /*
+ * Else use the 3 top bits of the chip ID which should be
+ * the node on both P7 and P8
+ */
+ return chip->id >> 3;
+}
+
+void add_associativity_ref_point(void)
+{
+ int ref2 = 0x4;
+
+ /*
+ * Note about our use of reference points:
+ *
+ * Linux currently supports two levels of NUMA. We use the first
+ * reference point for the node ID and the second reference point
+ * for a second level of affinity. We always use the chip ID (4)
+ * for the first reference point.
+ *
+ * Choosing the second level of affinity is model specific
+ * unfortunately. Current POWER8E models should use the DCM
+ * as a second level of NUMA.
+ *
+ * If there is a way to obtain this information from the FSP
+ * that would be ideal, but for now hardwire our POWER8E setting.
+ */
+ if (PVR_TYPE(mfspr(SPR_PVR)) == PVR_TYPE_P8E)
+ ref2 = 0x3;
+
+ dt_add_property_cells(opal_node, "ibm,associativity-reference-points",
+ 0x4, ref2);
+}
+
+void add_chip_dev_associativity(struct dt_node *dev)
+{
+ uint32_t chip_id = dt_get_chip_id(dev);
+ struct proc_chip *chip = get_chip(chip_id);
+ uint32_t hw_cid, hw_mid;
+
+ if (!chip)
+ return;
+
+ hw_cid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-card-id", 0);
+ hw_mid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-module-id", 0);
+
+ dt_add_property_cells(dev, "ibm,associativity", 4,
+ get_chip_node_id(chip),
+ hw_cid, hw_mid, chip_id);
+}
+
+void add_core_associativity(struct cpu_thread *cpu)
+{
+ struct proc_chip *chip = get_chip(cpu->chip_id);
+ uint32_t hw_cid, hw_mid, core_id;
+
+ if (!chip)
+ return;
+
+ if (proc_gen == proc_gen_p7)
+ core_id = (cpu->pir >> 2) & 0x7;
+ else if (proc_gen == proc_gen_p8)
+ core_id = (cpu->pir >> 3) & 0xf;
+ else
+ return;
+
+ hw_cid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-card-id", 0);
+ hw_mid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-module-id", 0);
+
+ dt_add_property_cells(cpu->node, "ibm,associativity", 5,
+ get_chip_node_id(chip),
+ hw_cid, hw_mid, chip->id, core_id);
+}
diff --git a/core/backtrace.c b/core/backtrace.c
new file mode 100644
index 0000000..3439db0
--- /dev/null
+++ b/core/backtrace.c
@@ -0,0 +1,41 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <processor.h>
+#include <cpu.h>
+
+void backtrace(void)
+{
+ unsigned int pir = mfspr(SPR_PIR);
+ unsigned long *sp;
+ unsigned long *bottom, *top;
+
+ /* Check if there's a __builtin_something instead */
+ asm("mr %0,1" : "=r" (sp));
+
+ bottom = cpu_stack_bottom(pir);
+ top = cpu_stack_top(pir);
+
+ /* XXX Handle SMP */
+ fprintf(stderr, "CPU %08x Backtrace:\n", pir);
+ while(sp > bottom && sp < top) {
+ fprintf(stderr, " S: %016lx R: %016lx\n",
+ (unsigned long)sp, sp[2]);
+ sp = (unsigned long *)sp[0];
+ }
+}
diff --git a/core/chip.c b/core/chip.c
new file mode 100644
index 0000000..e6eb81c
--- /dev/null
+++ b/core/chip.c
@@ -0,0 +1,85 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <chip.h>
+#include <device.h>
+
+static struct proc_chip *chips[MAX_CHIPS];
+
+uint32_t pir_to_chip_id(uint32_t pir)
+{
+ if (proc_gen == proc_gen_p8)
+ return P8_PIR2GCID(pir);
+ else
+ return P7_PIR2GCID(pir);
+}
+
+uint32_t pir_to_core_id(uint32_t pir)
+{
+ if (proc_gen == proc_gen_p8)
+ return P8_PIR2COREID(pir);
+ else
+ return P7_PIR2COREID(pir);
+}
+
+uint32_t pir_to_thread_id(uint32_t pir)
+{
+ if (proc_gen == proc_gen_p8)
+ return P8_PIR2THREADID(pir);
+ else
+ return P7_PIR2THREADID(pir);
+}
+
+struct proc_chip *next_chip(struct proc_chip *chip)
+{
+ unsigned int i;
+
+ for (i = chip ? (chip->id + 1) : 0; i < MAX_CHIPS; i++)
+ if (chips[i])
+ return chips[i];
+ return NULL;
+}
+
+
+struct proc_chip *get_chip(uint32_t chip_id)
+{
+ return chips[chip_id];
+}
+
+void init_chips(void)
+{
+ struct proc_chip *chip;
+ struct dt_node *xn;
+
+ /* We walk the chips based on xscom nodes in the tree */
+ dt_for_each_compatible(dt_root, xn, "ibm,xscom") {
+ uint32_t id = dt_get_chip_id(xn);
+
+ assert(id < MAX_CHIPS);
+
+ chip = zalloc(sizeof(struct proc_chip));
+ assert(chip);
+ chip->id = id;
+ chip->devnode = xn;
+ chips[id] = chip;
+ chip->dbob_id = dt_prop_get_u32_def(xn, "ibm,dbob-id",
+ 0xffffffff);
+ chip->pcid = dt_prop_get_u32_def(xn, "ibm,proc-chip-id",
+ 0xffffffff);
+ };
+}
diff --git a/core/console.c b/core/console.c
new file mode 100644
index 0000000..b291b1b
--- /dev/null
+++ b/core/console.c
@@ -0,0 +1,334 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Console IO routine for use by libc
+ *
+ * fd is the classic posix 0,1,2 (stdin, stdout, stderr)
+ */
+#include <skiboot.h>
+#include <unistd.h>
+#include <console.h>
+#include <opal.h>
+#include <device.h>
+#include <processor.h>
+#include <cpu.h>
+
+static char *con_buf = (char *)INMEM_CON_START;
+static size_t con_in;
+static size_t con_out;
+static bool con_wrapped;
+static struct con_ops *con_driver;
+
+struct lock con_lock = LOCK_UNLOCKED;
+
+/* This is mapped via TCEs so we keep it alone in a page */
+struct memcons memcons __section(".data.memcons") = {
+ .magic = MEMCONS_MAGIC,
+ .obuf_phys = INMEM_CON_START,
+ .ibuf_phys = INMEM_CON_START + INMEM_CON_OUT_LEN,
+ .obuf_size = INMEM_CON_OUT_LEN,
+ .ibuf_size = INMEM_CON_IN_LEN,
+};
+
+bool dummy_console_enabled(void)
+{
+#ifdef FORCE_DUMMY_CONSOLE
+ return true;
+#else
+ return dt_has_node_property(dt_chosen,
+ "sapphire,enable-dummy-console", NULL);
+#endif
+}
+
+void force_dummy_console(void)
+{
+ dt_add_property(dt_chosen, "sapphire,enable-dummy-console", NULL, 0);
+}
+
+#ifdef MAMBO_CONSOLE
+static void mambo_write(const char *buf, size_t count)
+{
+#define SIM_WRITE_CONSOLE_CODE 0
+ register int c asm("r3") = 0; /* SIM_WRITE_CONSOLE_CODE */
+ register unsigned long a1 asm("r4") = (unsigned long)buf;
+ register unsigned long a2 asm("r5") = count;
+ register unsigned long a3 asm("r6") = 0;
+ asm volatile (".long 0x000eaeb0":"=r" (c):"r"(c), "r"(a1), "r"(a2),
+ "r"(a3));
+}
+#else
+static void mambo_write(const char *buf __unused, size_t count __unused) { }
+#endif /* MAMBO_CONSOLE */
+
+void clear_console(void)
+{
+ memset(con_buf, 0, INMEM_CON_LEN);
+}
+
+/*
+ * Flush the console buffer into the driver, returns true
+ * if there is more to go
+ */
+bool __flush_console(void)
+{
+ struct cpu_thread *cpu = this_cpu();
+ size_t req, len = 0;
+ static bool in_flush, more_flush;
+
+ /* Is there anything to flush ? Bail out early if not */
+ if (con_in == con_out || !con_driver)
+ return false;
+
+ /*
+ * Console flushing is suspended on this CPU, typically because
+ * some critical locks are held that would potentially case a
+ * flush to deadlock
+ */
+ if (cpu->con_suspend) {
+ cpu->con_need_flush = true;
+ return false;
+ }
+ cpu->con_need_flush = false;
+
+ /*
+ * We must call the underlying driver with the console lock
+ * dropped otherwise we get some deadlocks if anything down
+ * that path tries to printf() something.
+ *
+ * So instead what we do is we keep a static in_flush flag
+ * set/released with the lock held, which is used to prevent
+ * concurrent attempts at flushing the same chunk of buffer
+ * by other processors.
+ */
+ if (in_flush) {
+ more_flush = true;
+ return false;
+ }
+ in_flush = true;
+
+ do {
+ more_flush = false;
+ if (con_out > con_in) {
+ req = INMEM_CON_OUT_LEN - con_out;
+ unlock(&con_lock);
+ len = con_driver->write(con_buf + con_out, req);
+ lock(&con_lock);
+ con_out = (con_out + len) % INMEM_CON_OUT_LEN;
+ if (len < req)
+ goto bail;
+ }
+ if (con_out < con_in) {
+ unlock(&con_lock);
+ len = con_driver->write(con_buf + con_out,
+ con_in - con_out);
+ lock(&con_lock);
+ con_out = (con_out + len) % INMEM_CON_OUT_LEN;
+ }
+ } while(more_flush);
+bail:
+ in_flush = false;
+ return con_out != con_in;
+}
+
+bool flush_console(void)
+{
+ bool ret;
+
+ lock(&con_lock);
+ ret = __flush_console();
+ unlock(&con_lock);
+
+ return ret;
+}
+
+static void inmem_write(char c)
+{
+ uint32_t opos;
+
+ if (!c)
+ return;
+ con_buf[con_in++] = c;
+ if (con_in >= INMEM_CON_OUT_LEN) {
+ con_in = 0;
+ con_wrapped = true;
+ }
+
+ /*
+ * We must always re-generate memcons.out_pos because
+ * under some circumstances, the console script will
+ * use a broken putmemproc that does RMW on the full
+ * 8 bytes containing out_pos and in_prod, thus corrupting
+ * out_pos
+ */
+ opos = con_in;
+ if (con_wrapped)
+ opos |= MEMCONS_OUT_POS_WRAP;
+ lwsync();
+ memcons.out_pos = opos;
+
+ /* If head reaches tail, push tail around & drop chars */
+ if (con_in == con_out)
+ con_out = (con_in + 1) % INMEM_CON_OUT_LEN;
+}
+
+static size_t inmem_read(char *buf, size_t req)
+{
+ size_t read = 0;
+ char *ibuf = (char *)memcons.ibuf_phys;
+
+ while (req && memcons.in_prod != memcons.in_cons) {
+ *(buf++) = ibuf[memcons.in_cons];
+ lwsync();
+ memcons.in_cons = (memcons.in_cons + 1) % INMEM_CON_IN_LEN;
+ req--;
+ read++;
+ }
+ return read;
+}
+
+static void write_char(char c)
+{
+ mambo_write(&c, 1);
+ inmem_write(c);
+}
+
+ssize_t write(int fd __unused, const void *buf, size_t count)
+{
+ /* We use recursive locking here as we can get called
+ * from fairly deep debug path
+ */
+ bool need_unlock = lock_recursive(&con_lock);
+ const char *cbuf = buf;
+
+ while(count--) {
+ char c = *(cbuf++);
+ if (c == 10)
+ write_char(13);
+ write_char(c);
+ }
+
+ __flush_console();
+
+ if (need_unlock)
+ unlock(&con_lock);
+
+ return count;
+}
+
+ssize_t read(int fd __unused, void *buf, size_t req_count)
+{
+ bool need_unlock = lock_recursive(&con_lock);
+ size_t count = 0;
+
+ if (con_driver && con_driver->read)
+ count = con_driver->read(buf, req_count);
+ if (!count)
+ count = inmem_read(buf, req_count);
+ if (need_unlock)
+ unlock(&con_lock);
+ return count;
+}
+
+void set_console(struct con_ops *driver)
+{
+ con_driver = driver;
+ if (driver)
+ flush_console();
+}
+
+void memcons_add_properties(void)
+{
+ uint64_t addr = (u64)&memcons;
+
+ dt_add_property_cells(opal_node, "ibm,opal-memcons",
+ hi32(addr), lo32(addr));
+}
+
+/*
+ * Default OPAL console provided if nothing else overrides it
+ */
+static int64_t dummy_console_write(int64_t term_number, int64_t *length,
+ const uint8_t *buffer)
+{
+ if (term_number != 0)
+ return OPAL_PARAMETER;
+ write(0, buffer, *length);
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_CONSOLE_WRITE, dummy_console_write, 3);
+
+static int64_t dummy_console_write_buffer_space(int64_t term_number,
+ int64_t *length)
+{
+ if (term_number != 0)
+ return OPAL_PARAMETER;
+ if (length)
+ *length = INMEM_CON_OUT_LEN;
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_CONSOLE_WRITE_BUFFER_SPACE, dummy_console_write_buffer_space, 2);
+
+static int64_t dummy_console_read(int64_t term_number, int64_t *length,
+ uint8_t *buffer)
+{
+ if (term_number != 0)
+ return OPAL_PARAMETER;
+ *length = read(0, buffer, *length);
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_CONSOLE_READ, dummy_console_read, 3);
+
+static void dummy_console_poll(void *data __unused)
+{
+ bool uart_has_data;
+
+ lock(&con_lock);
+ uart_has_data = uart_console_poll();
+
+ if (uart_has_data || memcons.in_prod != memcons.in_cons)
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT,
+ OPAL_EVENT_CONSOLE_INPUT);
+ else
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, 0);
+ unlock(&con_lock);
+
+}
+
+void dummy_console_add_nodes(void)
+{
+ struct dt_node *con, *consoles;
+
+ consoles = dt_new(opal_node, "consoles");
+ assert(consoles);
+ dt_add_property_cells(consoles, "#address-cells", 1);
+ dt_add_property_cells(consoles, "#size-cells", 0);
+
+ con = dt_new_addr(consoles, "serial", 0);
+ assert(con);
+ dt_add_property_string(con, "compatible", "ibm,opal-console-raw");
+ dt_add_property_cells(con, "#write-buffer-size", INMEM_CON_OUT_LEN);
+ dt_add_property_cells(con, "reg", 0);
+ dt_add_property_string(con, "device_type", "serial");
+
+ dt_add_property_string(dt_chosen, "linux,stdout-path",
+ "/ibm,opal/consoles/serial@0");
+
+ opal_add_poller(dummy_console_poll, NULL);
+}
diff --git a/core/cpu.c b/core/cpu.c
new file mode 100644
index 0000000..0eea946
--- /dev/null
+++ b/core/cpu.c
@@ -0,0 +1,672 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * TODO: Index array by PIR to be able to catch them easily
+ * from assembly such as machine checks etc...
+ */
+#include <skiboot.h>
+#include <cpu.h>
+#include <fsp.h>
+#include <device.h>
+#include <opal.h>
+#include <stack.h>
+#include <trace.h>
+#include <affinity.h>
+#include <chip.h>
+#include <timebase.h>
+#include <ccan/str/str.h>
+#include <ccan/container_of/container_of.h>
+
+/* The cpu_threads array is static and indexed by PIR in
+ * order to speed up lookup from asm entry points
+ */
+struct cpu_stack {
+ union {
+ uint8_t stack[STACK_SIZE];
+ struct cpu_thread cpu;
+ };
+} __align(STACK_SIZE);
+
+static struct cpu_stack *cpu_stacks = (struct cpu_stack *)CPU_STACKS_BASE;
+unsigned int cpu_thread_count;
+unsigned int cpu_max_pir;
+struct cpu_thread *boot_cpu;
+static struct lock reinit_lock = LOCK_UNLOCKED;
+
+unsigned long cpu_secondary_start __force_data = 0;
+
+struct cpu_job {
+ struct list_node link;
+ void (*func)(void *data);
+ void *data;
+ bool complete;
+ bool no_return;
+};
+
+/* attribute const as cpu_stacks is constant. */
+void __attrconst *cpu_stack_bottom(unsigned int pir)
+{
+ return (void *)&cpu_stacks[pir] + sizeof(struct cpu_thread);
+}
+
+void __attrconst *cpu_stack_top(unsigned int pir)
+{
+ /* This is the top of the MC stack which is above the normal
+ * stack, which means a SP between cpu_stack_bottom() and
+ * cpu_stack_top() can either be a normal stack pointer or
+ * a Machine Check stack pointer
+ */
+ return (void *)&cpu_stacks[pir] + STACK_SIZE - STACK_TOP_GAP;
+}
+
+struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
+ void (*func)(void *data), void *data,
+ bool no_return)
+{
+ struct cpu_job *job;
+
+ if (!cpu_is_available(cpu)) {
+ prerror("CPU: Tried to queue job on unavailable CPU 0x%04x\n",
+ cpu->pir);
+ return NULL;
+ }
+
+ job = zalloc(sizeof(struct cpu_job));
+ if (!job)
+ return NULL;
+ job->func = func;
+ job->data = data;
+ job->complete = false;
+ job->no_return = no_return;
+
+ if (cpu != this_cpu()) {
+ lock(&cpu->job_lock);
+ list_add_tail(&cpu->job_queue, &job->link);
+ unlock(&cpu->job_lock);
+ } else {
+ func(data);
+ job->complete = true;
+ }
+
+ /* XXX Add poking of CPU with interrupt */
+
+ return job;
+}
+
+bool cpu_poll_job(struct cpu_job *job)
+{
+ lwsync();
+ return job->complete;
+}
+
+void cpu_wait_job(struct cpu_job *job, bool free_it)
+{
+ if (!job)
+ return;
+
+ while(!job->complete) {
+ /* Handle mbox if master CPU */
+ if (this_cpu() == boot_cpu)
+ fsp_poll();
+ else
+ smt_low();
+ lwsync();
+ }
+ lwsync();
+ smt_medium();
+
+ if (free_it)
+ free(job);
+}
+
+void cpu_free_job(struct cpu_job *job)
+{
+ if (!job)
+ return;
+
+ assert(job->complete);
+ free(job);
+}
+
+void cpu_process_jobs(void)
+{
+ struct cpu_thread *cpu = this_cpu();
+ struct cpu_job *job;
+ void (*func)(void *);
+ void *data;
+
+ sync();
+ if (list_empty(&cpu->job_queue))
+ return;
+
+ lock(&cpu->job_lock);
+ while (true) {
+ bool no_return;
+
+ if (list_empty(&cpu->job_queue))
+ break;
+ smt_medium();
+ job = list_pop(&cpu->job_queue, struct cpu_job, link);
+ if (!job)
+ break;
+ func = job->func;
+ data = job->data;
+ no_return = job->no_return;
+ unlock(&cpu->job_lock);
+ if (no_return)
+ free(job);
+ func(data);
+ lock(&cpu->job_lock);
+ if (!no_return) {
+ lwsync();
+ job->complete = true;
+ }
+ }
+ unlock(&cpu->job_lock);
+}
+
+struct dt_node *get_cpu_node(u32 pir)
+{
+ struct cpu_thread *t = find_cpu_by_pir(pir);
+
+ return t ? t->node : NULL;
+}
+
+/* This only covers primary, active cpus */
+struct cpu_thread *find_cpu_by_chip_id(u32 chip_id)
+{
+ struct cpu_thread *t;
+
+ for_each_available_cpu(t) {
+ if (t->is_secondary)
+ continue;
+ if (t->chip_id == chip_id)
+ return t;
+ }
+ return NULL;
+}
+
+struct cpu_thread *find_cpu_by_node(struct dt_node *cpu)
+{
+ struct cpu_thread *t;
+
+ for_each_available_cpu(t) {
+ if (t->node == cpu)
+ return t;
+ }
+ return NULL;
+}
+
+struct cpu_thread *find_cpu_by_pir(u32 pir)
+{
+ if (pir > cpu_max_pir)
+ return NULL;
+ return &cpu_stacks[pir].cpu;
+}
+
+struct cpu_thread *find_cpu_by_server(u32 server_no)
+{
+ struct cpu_thread *t;
+
+ for_each_cpu(t) {
+ if (t->server_no == server_no)
+ return t;
+ }
+ return NULL;
+}
+
+struct cpu_thread *next_cpu(struct cpu_thread *cpu)
+{
+ struct cpu_stack *s = container_of(cpu, struct cpu_stack, cpu);
+ unsigned int index;
+
+ if (cpu == NULL)
+ index = 0;
+ else
+ index = s - cpu_stacks + 1;
+ for (; index <= cpu_max_pir; index++) {
+ cpu = &cpu_stacks[index].cpu;
+ if (cpu->state != cpu_state_no_cpu)
+ return cpu;
+ }
+ return NULL;
+}
+
+struct cpu_thread *first_cpu(void)
+{
+ return next_cpu(NULL);
+}
+
+struct cpu_thread *next_available_cpu(struct cpu_thread *cpu)
+{
+ do {
+ cpu = next_cpu(cpu);
+ } while(cpu && !cpu_is_available(cpu));
+
+ return cpu;
+}
+
+struct cpu_thread *first_available_cpu(void)
+{
+ return next_available_cpu(NULL);
+}
+
+struct cpu_thread *next_available_core_in_chip(struct cpu_thread *core,
+ u32 chip_id)
+{
+ do {
+ core = next_cpu(core);
+ } while(core && (!cpu_is_available(core) ||
+ core->chip_id != chip_id ||
+ core->is_secondary));
+ return core;
+}
+
+struct cpu_thread *first_available_core_in_chip(u32 chip_id)
+{
+ return next_available_core_in_chip(NULL, chip_id);
+}
+
+uint32_t cpu_get_core_index(struct cpu_thread *cpu)
+{
+ return pir_to_core_id(cpu->pir);
+}
+
+void cpu_remove_node(const struct cpu_thread *t)
+{
+ struct dt_node *i;
+
+ /* Find this cpu node */
+ dt_for_each_node(dt_root, i) {
+ const struct dt_property *p;
+
+ if (!dt_has_node_property(i, "device_type", "cpu"))
+ continue;
+ p = dt_find_property(i, "ibm,pir");
+ if (dt_property_get_cell(p, 0) == t->pir) {
+ dt_free(i);
+ return;
+ }
+ }
+ prerror("CPU: Could not find cpu node %i to remove!\n", t->pir);
+ abort();
+}
+
+void cpu_disable_all_threads(struct cpu_thread *cpu)
+{
+ unsigned int i;
+
+ for (i = 0; i <= cpu_max_pir; i++) {
+ struct cpu_thread *t = &cpu_stacks[i].cpu;
+
+ if (t->primary == cpu->primary)
+ t->state = cpu_state_disabled;
+ }
+
+ /* XXX Do something to actually stop the core */
+}
+
+static void init_cpu_thread(struct cpu_thread *t,
+ enum cpu_thread_state state,
+ unsigned int pir)
+{
+ init_lock(&t->job_lock);
+ list_head_init(&t->job_queue);
+ t->state = state;
+ t->pir = pir;
+ assert(pir == container_of(t, struct cpu_stack, cpu) - cpu_stacks);
+}
+
+void pre_init_boot_cpu(void)
+{
+ struct cpu_thread *cpu = this_cpu();
+
+ memset(cpu, 0, sizeof(struct cpu_thread));
+}
+
+void init_boot_cpu(void)
+{
+ unsigned int i, pir, pvr;
+
+ pir = mfspr(SPR_PIR);
+ pvr = mfspr(SPR_PVR);
+
+ /* Get a CPU thread count and an initial max PIR based on PVR */
+ switch(PVR_TYPE(pvr)) {
+ case PVR_TYPE_P7:
+ case PVR_TYPE_P7P:
+ cpu_thread_count = 4;
+ cpu_max_pir = SPR_PIR_P7_MASK;
+ proc_gen = proc_gen_p7;
+ printf("CPU: P7 generation processor\n");
+ break;
+ case PVR_TYPE_P8E:
+ case PVR_TYPE_P8:
+ cpu_thread_count = 8;
+ cpu_max_pir = SPR_PIR_P8_MASK;
+ proc_gen = proc_gen_p8;
+ printf("CPU: P8 generation processor\n");
+ break;
+ default:
+ prerror("CPU: Unknown PVR, assuming 1 thread\n");
+ cpu_thread_count = 1;
+ cpu_max_pir = mfspr(SPR_PIR);
+ proc_gen = proc_gen_unknown;
+ }
+
+ printf("CPU: Boot CPU PIR is 0x%04x PVR is 0x%08x\n", pir, pvr);
+ printf("CPU: Initial max PIR set to 0x%x\n", cpu_max_pir);
+ printf("CPU: Assuming max %d threads per core\n", cpu_thread_count);
+
+ /* Clear the CPU structs */
+ for (i = 0; i <= cpu_max_pir; i++)
+ memset(&cpu_stacks[i].cpu, 0, sizeof(struct cpu_thread));
+
+ /* Setup boot CPU state */
+ boot_cpu = &cpu_stacks[pir].cpu;
+ init_cpu_thread(boot_cpu, cpu_state_active, pir);
+ init_boot_tracebuf(boot_cpu);
+ assert(this_cpu() == boot_cpu);
+}
+
+void init_all_cpus(void)
+{
+ struct dt_node *cpus, *cpu;
+ unsigned int thread, new_max_pir = 0;
+
+ cpus = dt_find_by_path(dt_root, "/cpus");
+ assert(cpus);
+
+ /* Iterate all CPUs in the device-tree */
+ dt_for_each_child(cpus, cpu) {
+ unsigned int pir, server_no, chip_id;
+ enum cpu_thread_state state;
+ const struct dt_property *p;
+ struct cpu_thread *t, *pt;
+
+ /* Skip cache nodes */
+ if (strcmp(dt_prop_get(cpu, "device_type"), "cpu"))
+ continue;
+
+ server_no = dt_prop_get_u32(cpu, "reg");
+
+ /* If PIR property is absent, assume it's the same as the
+ * server number
+ */
+ pir = dt_prop_get_u32_def(cpu, "ibm,pir", server_no);
+
+ /* We should always have an ibm,chip-id property */
+ chip_id = dt_get_chip_id(cpu);
+
+ /* Only use operational CPUs */
+ if (!strcmp(dt_prop_get(cpu, "status"), "okay"))
+ state = cpu_state_present;
+ else
+ state = cpu_state_unavailable;
+
+ printf("CPU: CPU from DT PIR=0x%04x Server#=0x%x State=%d\n",
+ pir, server_no, state);
+
+ /* Setup thread 0 */
+ t = pt = &cpu_stacks[pir].cpu;
+ if (t != boot_cpu) {
+ init_cpu_thread(t, state, pir);
+ /* Each cpu gets its own later in init_trace_buffers */
+ t->trace = boot_cpu->trace;
+ }
+ t->server_no = server_no;
+ t->primary = t;
+ t->node = cpu;
+ t->chip_id = chip_id;
+ t->icp_regs = 0; /* Will be set later */
+
+ /* Add associativity properties */
+ add_core_associativity(t);
+
+ /* Adjust max PIR */
+ if (new_max_pir < (pir + cpu_thread_count - 1))
+ new_max_pir = pir + cpu_thread_count - 1;
+
+ /* Iterate threads */
+ p = dt_find_property(cpu, "ibm,ppc-interrupt-server#s");
+ if (!p)
+ continue;
+ for (thread = 1; thread < (p->len / 4); thread++) {
+ printf("CPU: secondary thread %d found\n", thread);
+ t = &cpu_stacks[pir + thread].cpu;
+ init_cpu_thread(t, state, pir + thread);
+ t->trace = boot_cpu->trace;
+ t->server_no = ((const u32 *)p->prop)[thread];
+ t->is_secondary = true;
+ t->primary = pt;
+ t->node = cpu;
+ t->chip_id = chip_id;
+ }
+ }
+ cpu_max_pir = new_max_pir;
+ printf("CPU: New max PIR set to 0x%x\n", new_max_pir);
+}
+
+void cpu_bringup(void)
+{
+ struct cpu_thread *t;
+
+ printf("CPU: Setting up secondary CPU state\n");
+
+ op_display(OP_LOG, OP_MOD_CPU, 0x0000);
+
+ /* Tell everybody to chime in ! */
+ printf("CPU: Calling in all processors...\n");
+ cpu_secondary_start = 1;
+ sync();
+
+ op_display(OP_LOG, OP_MOD_CPU, 0x0002);
+
+ for_each_cpu(t) {
+ if (t->state != cpu_state_present &&
+ t->state != cpu_state_active)
+ continue;
+
+ /* Add a callin timeout ? If so, call cpu_remove_node(t). */
+ while (t->state != cpu_state_active) {
+ smt_very_low();
+ sync();
+ }
+ smt_medium();
+ }
+
+ printf("CPU: All processors called in...\n");
+
+ op_display(OP_LOG, OP_MOD_CPU, 0x0003);
+}
+
+void cpu_callin(struct cpu_thread *cpu)
+{
+ cpu->state = cpu_state_active;
+}
+
+static void opal_start_thread_job(void *data)
+{
+ cpu_give_self_os();
+
+ /* We do not return, so let's mark the job as
+ * complete
+ */
+ start_kernel_secondary((uint64_t)data);
+}
+
+static int64_t opal_start_cpu_thread(uint64_t server_no, uint64_t start_address)
+{
+ struct cpu_thread *cpu;
+ struct cpu_job *job;
+
+ cpu = find_cpu_by_server(server_no);
+ if (!cpu) {
+ prerror("OPAL: Start invalid CPU 0x%04llx !\n", server_no);
+ return OPAL_PARAMETER;
+ }
+ printf("OPAL: Start CPU 0x%04llx (PIR 0x%04x) -> 0x%016llx\n",
+ server_no, cpu->pir, start_address);
+
+ lock(&reinit_lock);
+ if (!cpu_is_available(cpu)) {
+ unlock(&reinit_lock);
+ prerror("OPAL: CPU not active in OPAL !\n");
+ return OPAL_WRONG_STATE;
+ }
+ job = __cpu_queue_job(cpu, opal_start_thread_job, (void *)start_address,
+ true);
+ unlock(&reinit_lock);
+ if (!job) {
+ prerror("OPAL: Failed to create CPU start job !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_START_CPU, opal_start_cpu_thread, 2);
+
+static int64_t opal_query_cpu_status(uint64_t server_no, uint8_t *thread_status)
+{
+ struct cpu_thread *cpu;
+
+ cpu = find_cpu_by_server(server_no);
+ if (!cpu) {
+ prerror("OPAL: Query invalid CPU 0x%04llx !\n", server_no);
+ return OPAL_PARAMETER;
+ }
+ if (!cpu_is_available(cpu) && cpu->state != cpu_state_os) {
+ prerror("OPAL: CPU not active in OPAL nor OS !\n");
+ return OPAL_PARAMETER;
+ }
+ switch(cpu->state) {
+ case cpu_state_os:
+ *thread_status = OPAL_THREAD_STARTED;
+ break;
+ case cpu_state_active:
+ /* Active in skiboot -> inactive in OS */
+ *thread_status = OPAL_THREAD_INACTIVE;
+ break;
+ default:
+ *thread_status = OPAL_THREAD_UNAVAILABLE;
+ }
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_QUERY_CPU_STATUS, opal_query_cpu_status, 2);
+
+static int64_t opal_return_cpu(void)
+{
+ printf("OPAL: Returning CPU 0x%04x\n", this_cpu()->pir);
+
+ __secondary_cpu_entry();
+
+ return OPAL_HARDWARE; /* Should not happen */
+}
+opal_call(OPAL_RETURN_CPU, opal_return_cpu, 0);
+
+static void cpu_change_hile(void *hilep)
+{
+ bool hile = *(bool *)hilep;
+ unsigned long hid0;
+
+ hid0 = mfspr(SPR_HID0);
+ if (hile)
+ hid0 |= SPR_HID0_HILE;
+ else
+ hid0 &= ~SPR_HID0_HILE;
+ printf("CPU: [%08x] HID0 set to 0x%016lx\n", this_cpu()->pir, hid0);
+ set_hid0(hid0);
+
+ this_cpu()->current_hile = hile;
+}
+
+static int64_t cpu_change_all_hile(bool hile)
+{
+ struct cpu_thread *cpu;
+
+ printf("CPU: Switching HILE on all CPUs to %d\n", hile);
+
+ for_each_available_cpu(cpu) {
+ if (cpu->current_hile == hile)
+ continue;
+ if (cpu == this_cpu()) {
+ cpu_change_hile(&hile);
+ continue;
+ }
+ cpu_wait_job(cpu_queue_job(cpu, cpu_change_hile, &hile), true);
+ }
+ return OPAL_SUCCESS;
+}
+
+static int64_t opal_reinit_cpus(uint64_t flags)
+{
+ struct cpu_thread *cpu;
+ int64_t rc = OPAL_SUCCESS;
+ int i;
+
+ lock(&reinit_lock);
+
+ prerror("OPAL: Trying a CPU re-init with flags: 0x%llx\n", flags);
+
+ for (cpu = first_cpu(); cpu; cpu = next_cpu(cpu)) {
+ if (cpu == this_cpu())
+ continue;
+ if (cpu->state == cpu_state_os) {
+ /*
+ * That might be a race with return CPU during kexec
+ * where we are still, wait a bit and try again
+ */
+ for (i = 0; (i < 3) && (cpu->state == cpu_state_os); i++)
+ time_wait_ms(1);
+ if (cpu->state == cpu_state_os) {
+ prerror("OPAL: CPU 0x%x not in OPAL !\n", cpu->pir);
+ rc = OPAL_WRONG_STATE;
+ goto bail;
+ }
+ }
+ }
+ /*
+ * Now we need to mark ourselves "active" or we'll be skipped
+ * by the various "for_each_active_..." calls done by slw_reinit()
+ */
+ this_cpu()->state = cpu_state_active;
+
+ /*
+ * If the flags affect endianness and we are on P8 DD2 or later, then
+ * use the HID bit. We use the PVR (we could use the EC level in
+ * the chip but the PVR is more readily available).
+ */
+ if (proc_gen == proc_gen_p8 && PVR_VERS_MAJ(mfspr(SPR_PVR)) >= 2 &&
+ (flags & (OPAL_REINIT_CPUS_HILE_BE | OPAL_REINIT_CPUS_HILE_LE))) {
+ bool hile = !!(flags & OPAL_REINIT_CPUS_HILE_LE);
+
+ flags &= ~(OPAL_REINIT_CPUS_HILE_BE | OPAL_REINIT_CPUS_HILE_LE);
+ rc = cpu_change_all_hile(hile);
+ }
+
+ /* Any flags left ? */
+ if (flags != 0)
+ rc = slw_reinit(flags);
+
+ /* And undo the above */
+ this_cpu()->state = cpu_state_os;
+
+bail:
+ unlock(&reinit_lock);
+ return rc;
+}
+opal_call(OPAL_REINIT_CPUS, opal_reinit_cpus, 1);
diff --git a/core/device.c b/core/device.c
new file mode 100644
index 0000000..28cccb7
--- /dev/null
+++ b/core/device.c
@@ -0,0 +1,791 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <device.h>
+#include <stdlib.h>
+#include <skiboot.h>
+#include <libfdt/libfdt.h>
+#include <libfdt/libfdt_internal.h>
+#include <ccan/str/str.h>
+#include <ccan/endian/endian.h>
+
+/* Used to give unique handles. */
+u32 last_phandle = 0;
+
+struct dt_node *dt_root;
+struct dt_node *dt_chosen;
+
+static const char *take_name(const char *name)
+{
+ if (!is_rodata(name) && !(name = strdup(name))) {
+ prerror("Failed to allocate copy of name");
+ abort();
+ }
+ return name;
+}
+
+static void free_name(const char *name)
+{
+ if (!is_rodata(name))
+ free((char *)name);
+}
+
+static struct dt_node *new_node(const char *name)
+{
+ struct dt_node *node = malloc(sizeof *node);
+ if (!node) {
+ prerror("Failed to allocate node\n");
+ abort();
+ }
+
+ node->name = take_name(name);
+ node->parent = NULL;
+ list_head_init(&node->properties);
+ list_head_init(&node->children);
+ /* FIXME: locking? */
+ node->phandle = ++last_phandle;
+ return node;
+}
+
+struct dt_node *dt_new_root(const char *name)
+{
+ return new_node(name);
+}
+
+bool dt_attach_root(struct dt_node *parent, struct dt_node *root)
+{
+ struct dt_node *node;
+
+ /* Look for duplicates */
+
+ assert(!root->parent);
+ dt_for_each_child(parent, node) {
+ if (!strcmp(node->name, root->name)) {
+ prerror("DT: %s failed, duplicate %s\n",
+ __func__, root->name);
+ return false;
+ }
+ }
+ list_add_tail(&parent->children, &root->list);
+ root->parent = parent;
+
+ return true;
+}
+
+struct dt_node *dt_new(struct dt_node *parent, const char *name)
+{
+ struct dt_node *new;
+ assert(parent);
+
+ new = new_node(name);
+ if (!dt_attach_root(parent, new)) {
+ free_name(new->name);
+ free(new);
+ return NULL;
+ }
+ return new;
+}
+
+struct dt_node *dt_new_addr(struct dt_node *parent, const char *name,
+ uint64_t addr)
+{
+ char *lname;
+ struct dt_node *new;
+ size_t len;
+
+ assert(parent);
+ len = strlen(name) + STR_MAX_CHARS(addr) + 2;
+ lname = malloc(len);
+ if (!lname)
+ return NULL;
+ snprintf(lname, len, "%s@%llx", name, (long long)addr);
+ new = new_node(lname);
+ free(lname);
+ if (!dt_attach_root(parent, new)) {
+ free_name(new->name);
+ free(new);
+ return NULL;
+ }
+ return new;
+}
+
+struct dt_node *dt_new_2addr(struct dt_node *parent, const char *name,
+ uint64_t addr0, uint64_t addr1)
+{
+ char *lname;
+ struct dt_node *new;
+ size_t len;
+ assert(parent);
+
+ len = strlen(name) + 2*STR_MAX_CHARS(addr0) + 3;
+ lname = malloc(len);
+ if (!lname)
+ return NULL;
+ snprintf(lname, len, "%s@%llx,%llx",
+ name, (long long)addr0, (long long)addr1);
+ new = new_node(lname);
+ free(lname);
+ if (!dt_attach_root(parent, new)) {
+ free_name(new->name);
+ free(new);
+ return NULL;
+ }
+ return new;
+}
+
+char *dt_get_path(const struct dt_node *node)
+{
+ unsigned int len = 0;
+ const struct dt_node *n;
+ char *path, *p;
+
+ /* Dealing with NULL is for test/debug purposes */
+ if (!node)
+ return strdup("<NULL>");
+
+ for (n = node; n; n = n->parent) {
+ len += strlen(n->name);
+ if (n->parent || n == node)
+ len++;
+ }
+ path = zalloc(len + 1);
+ assert(path);
+ p = path + len;
+ for (n = node; n; n = n->parent) {
+ len = strlen(n->name);
+ p -= len;
+ memcpy(p, n->name, len);
+ if (n->parent || n == node)
+ *(--p) = '/';
+ }
+ assert(p == path);
+
+ return p;
+}
+
+static const char *__dt_path_split(const char *p,
+ const char **namep, unsigned int *namel,
+ const char **addrp, unsigned int *addrl)
+{
+ const char *at, *sl;
+
+ *namel = *addrl = 0;
+
+ /* Skip initial '/' */
+ while (*p == '/')
+ p++;
+
+ /* Check empty path */
+ if (*p == 0)
+ return p;
+
+ at = strchr(p, '@');
+ sl = strchr(p, '/');
+ if (sl == NULL)
+ sl = p + strlen(p);
+ if (sl < at)
+ at = NULL;
+ if (at) {
+ *addrp = at + 1;
+ *addrl = sl - at - 1;
+ }
+ *namep = p;
+ *namel = at ? (at - p) : (sl - p);
+
+ return sl;
+}
+
+struct dt_node *dt_find_by_path(struct dt_node *root, const char *path)
+{
+ struct dt_node *n;
+ const char *pn, *pa, *p = path, *nn, *na;
+ unsigned int pnl, pal, nnl, nal;
+ bool match;
+
+ /* Walk path components */
+ while (*p) {
+ /* Extract next path component */
+ p = __dt_path_split(p, &pn, &pnl, &pa, &pal);
+ if (pnl == 0 && pal == 0)
+ break;
+
+ /* Compare with each child node */
+ match = false;
+ list_for_each(&root->children, n, list) {
+ match = true;
+ __dt_path_split(n->name, &nn, &nnl, &na, &nal);
+ if (pnl && (pnl != nnl || strncmp(pn, nn, pnl)))
+ match = false;
+ if (pal && (pal != nal || strncmp(pa, na, pal)))
+ match = false;
+ if (match) {
+ root = n;
+ break;
+ }
+ }
+
+ /* No child match */
+ if (!match)
+ return NULL;
+ }
+ return root;
+}
+
+struct dt_node *dt_find_by_phandle(struct dt_node *root, u32 phandle)
+{
+ struct dt_node *node;
+
+ dt_for_each_node(root, node)
+ if (node->phandle == phandle)
+ return node;
+ return NULL;
+}
+
+static struct dt_property *new_property(struct dt_node *node,
+ const char *name, size_t size)
+{
+ struct dt_property *p = malloc(sizeof(*p) + size);
+ if (!p) {
+ prerror("Failed to allocate property \"%s\" for %s of %zu bytes\n",
+ name, dt_get_path(node), size);
+ abort();
+ }
+ if (dt_find_property(node, name)) {
+ prerror("Duplicate property \"%s\" in node %s\n",
+ name, dt_get_path(node));
+ abort();
+
+ }
+
+ p->name = take_name(name);
+ p->len = size;
+ list_add_tail(&node->properties, &p->list);
+ return p;
+}
+
+struct dt_property *dt_add_property(struct dt_node *node,
+ const char *name,
+ const void *val, size_t size)
+{
+ struct dt_property *p;
+
+ /*
+ * Filter out phandle properties, we re-generate them
+ * when flattening
+ */
+ if (strcmp(name, "linux,phandle") == 0 ||
+ strcmp(name, "phandle") == 0) {
+ assert(size == 4);
+ node->phandle = *(const u32 *)val;
+ if (node->phandle >= last_phandle)
+ last_phandle = node->phandle;
+ return NULL;
+ }
+
+ p = new_property(node, name, size);
+ if (size)
+ memcpy(p->prop, val, size);
+ return p;
+}
+
+void dt_resize_property(struct dt_property **prop, size_t len)
+{
+ size_t new_len = sizeof(**prop) + len;
+
+ *prop = realloc(*prop, new_len);
+
+ /* Fix up linked lists in case we moved. (note: not an empty list). */
+ (*prop)->list.next->prev = &(*prop)->list;
+ (*prop)->list.prev->next = &(*prop)->list;
+}
+
+struct dt_property *dt_add_property_string(struct dt_node *node,
+ const char *name,
+ const char *value)
+{
+ return dt_add_property(node, name, value, strlen(value)+1);
+}
+
+struct dt_property *dt_add_property_nstr(struct dt_node *node,
+ const char *name,
+ const char *value, unsigned int vlen)
+{
+ struct dt_property *p;
+ char *tmp = zalloc(vlen + 1);
+
+ strncpy(tmp, value, vlen);
+ p = dt_add_property(node, name, tmp, strlen(tmp)+1);
+ free(tmp);
+
+ return p;
+}
+
+struct dt_property *__dt_add_property_cells(struct dt_node *node,
+ const char *name,
+ int count, ...)
+{
+ struct dt_property *p;
+ u32 *val;
+ unsigned int i;
+ va_list args;
+
+ p = new_property(node, name, count * sizeof(u32));
+ val = (u32 *)p->prop;
+ va_start(args, count);
+ for (i = 0; i < count; i++)
+ val[i] = cpu_to_fdt32(va_arg(args, u32));
+ va_end(args);
+ return p;
+}
+
+struct dt_property *__dt_add_property_u64s(struct dt_node *node,
+ const char *name,
+ int count, ...)
+{
+ struct dt_property *p;
+ u64 *val;
+ unsigned int i;
+ va_list args;
+
+ p = new_property(node, name, count * sizeof(u64));
+ val = (u64 *)p->prop;
+ va_start(args, count);
+ for (i = 0; i < count; i++)
+ val[i] = cpu_to_fdt64(va_arg(args, u64));
+ va_end(args);
+ return p;
+}
+
+struct dt_property *__dt_add_property_strings(struct dt_node *node,
+ const char *name,
+ int count, ...)
+{
+ struct dt_property *p;
+ unsigned int i, size;
+ va_list args;
+ const char *sstr;
+ char *s;
+
+ va_start(args, count);
+ for (i = size = 0; i < count; i++) {
+ sstr = va_arg(args, const char *);
+ if (sstr)
+ size += strlen(sstr) + 1;
+ }
+ va_end(args);
+ if (!size)
+ size = 1;
+ p = new_property(node, name, size);
+ s = (char *)p->prop;
+ *s = 0;
+ va_start(args, count);
+ for (i = 0; i < count; i++) {
+ sstr = va_arg(args, const char *);
+ if (sstr) {
+ strcpy(s, sstr);
+ s = s + strlen(sstr) + 1;
+ }
+ }
+ va_end(args);
+ return p;
+}
+
+void dt_del_property(struct dt_node *node, struct dt_property *prop)
+{
+ list_del_from(&node->properties, &prop->list);
+ free_name(prop->name);
+ free(prop);
+}
+
+u32 dt_property_get_cell(const struct dt_property *prop, u32 index)
+{
+ assert(prop->len >= (index+1)*sizeof(u32));
+ /* Always aligned, so this works. */
+ return fdt32_to_cpu(((const u32 *)prop->prop)[index]);
+}
+
+/* First child of this node. */
+struct dt_node *dt_first(const struct dt_node *root)
+{
+ return list_top(&root->children, struct dt_node, list);
+}
+
+/* Return next node, or NULL. */
+struct dt_node *dt_next(const struct dt_node *root,
+ const struct dt_node *prev)
+{
+ /* Children? */
+ if (!list_empty(&prev->children))
+ return dt_first(prev);
+
+ do {
+ /* More siblings? */
+ if (prev->list.next != &prev->parent->children.n)
+ return list_entry(prev->list.next, struct dt_node,list);
+
+ /* No more siblings, move up to parent. */
+ prev = prev->parent;
+ } while (prev != root);
+
+ return NULL;
+}
+
+struct dt_property *__dt_find_property(struct dt_node *node, const char *name)
+{
+ struct dt_property *i;
+
+ list_for_each(&node->properties, i, list)
+ if (strcmp(i->name, name) == 0)
+ return i;
+ return NULL;
+}
+
+const struct dt_property *dt_find_property(const struct dt_node *node,
+ const char *name)
+{
+ const struct dt_property *i;
+
+ list_for_each(&node->properties, i, list)
+ if (strcmp(i->name, name) == 0)
+ return i;
+ return NULL;
+}
+
+const struct dt_property *dt_require_property(const struct dt_node *node,
+ const char *name, int wanted_len)
+{
+ const struct dt_property *p = dt_find_property(node, name);
+
+ if (!p) {
+ const char *path = dt_get_path(node);
+
+ prerror("DT: Missing required property %s/%s\n",
+ path, name);
+ assert(false);
+ }
+ if (wanted_len >= 0 && p->len != wanted_len) {
+ const char *path = dt_get_path(node);
+
+ prerror("DT: Unexpected property length %s/%s\n",
+ path, name);
+ prerror("DT: Expected len: %d got len: %zu\n",
+ wanted_len, p->len);
+ assert(false);
+ }
+
+ return p;
+}
+
+bool dt_has_node_property(const struct dt_node *node,
+ const char *name, const char *val)
+{
+ const struct dt_property *p = dt_find_property(node, name);
+
+ if (!p)
+ return false;
+ if (!val)
+ return true;
+
+ return p->len == strlen(val) + 1 && memcmp(p->prop, val, p->len) == 0;
+}
+
+bool dt_prop_find_string(const struct dt_property *p, const char *s)
+{
+ const char *c, *end;
+
+ if (!p)
+ return false;
+ c = p->prop;
+ end = c + p->len;
+
+ while(c < end) {
+ if (!strcasecmp(s, c))
+ return true;
+ c += strlen(c) + 1;
+ }
+ return false;
+}
+
+bool dt_node_is_compatible(const struct dt_node *node, const char *compat)
+{
+ const struct dt_property *p = dt_find_property(node, "compatible");
+
+ return dt_prop_find_string(p, compat);
+}
+
+struct dt_node *dt_find_compatible_node(struct dt_node *root,
+ struct dt_node *prev,
+ const char *compat)
+{
+ struct dt_node *node;
+
+ node = prev ? dt_next(root, prev) : root;
+ for (; node; node = dt_next(root, node))
+ if (dt_node_is_compatible(node, compat))
+ return node;
+ return NULL;
+}
+
+u64 dt_prop_get_u64(const struct dt_node *node, const char *prop)
+{
+ const struct dt_property *p = dt_require_property(node, prop, 8);
+
+ return ((u64)dt_property_get_cell(p, 0) << 32)
+ | dt_property_get_cell(p, 1);
+}
+
+u64 dt_prop_get_u64_def(const struct dt_node *node, const char *prop, u64 def)
+{
+ const struct dt_property *p = dt_find_property(node, prop);
+
+ if (!p)
+ return def;
+
+ return ((u64)dt_property_get_cell(p, 0) << 32)
+ | dt_property_get_cell(p, 1);
+}
+
+u32 dt_prop_get_u32(const struct dt_node *node, const char *prop)
+{
+ const struct dt_property *p = dt_require_property(node, prop, 4);
+
+ return dt_property_get_cell(p, 0);
+}
+
+u32 dt_prop_get_u32_def(const struct dt_node *node, const char *prop, u32 def)
+{
+ const struct dt_property *p = dt_find_property(node, prop);
+
+ if (!p)
+ return def;
+
+ return dt_property_get_cell(p, 0);
+}
+
+const void *dt_prop_get(const struct dt_node *node, const char *prop)
+{
+ const struct dt_property *p = dt_require_property(node, prop, -1);
+
+ return p->prop;
+}
+
+const void *dt_prop_get_def(const struct dt_node *node, const char *prop,
+ void *def)
+{
+ const struct dt_property *p = dt_find_property(node, prop);
+
+ return p ? p->prop : def;
+}
+
+const void *dt_prop_get_def_size(const struct dt_node *node, const char *prop,
+ void *def, size_t *len)
+{
+ const struct dt_property *p = dt_find_property(node, prop);
+ *len = 0;
+ if (p)
+ *len = p->len;
+
+ return p ? p->prop : def;
+}
+
+u32 dt_prop_get_cell(const struct dt_node *node, const char *prop, u32 cell)
+{
+ const struct dt_property *p = dt_require_property(node, prop, -1);
+
+ return dt_property_get_cell(p, cell);
+}
+
+u32 dt_prop_get_cell_def(const struct dt_node *node, const char *prop,
+ u32 cell, u32 def)
+{
+ const struct dt_property *p = dt_find_property(node, prop);
+
+ if (!p)
+ return def;
+
+ return dt_property_get_cell(p, cell);
+}
+
+void dt_free(struct dt_node *node)
+{
+ struct dt_node *child;
+ struct dt_property *p;
+
+ while ((child = list_top(&node->children, struct dt_node, list)))
+ dt_free(child);
+
+ while ((p = list_pop(&node->properties, struct dt_property, list))) {
+ free_name(p->name);
+ free(p);
+ }
+
+ if (node->parent)
+ list_del_from(&node->parent->children, &node->list);
+ free_name(node->name);
+ free(node);
+}
+
+int dt_expand_node(struct dt_node *node, const void *fdt, int fdt_node)
+{
+ const struct fdt_property *prop;
+ int offset, nextoffset, err;
+ struct dt_node *child;
+ const char *name;
+ uint32_t tag;
+
+ if (((err = fdt_check_header(fdt)) != 0)
+ || ((err = _fdt_check_node_offset(fdt, fdt_node)) < 0)) {
+ prerror("FDT: Error %d parsing node 0x%x\n", err, fdt_node);
+ return -1;
+ }
+
+ nextoffset = err;
+ do {
+ offset = nextoffset;
+
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+ switch (tag) {
+ case FDT_PROP:
+ prop = _fdt_offset_ptr(fdt, offset);
+ name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+ dt_add_property(node, name, prop->data,
+ fdt32_to_cpu(prop->len));
+ break;
+ case FDT_BEGIN_NODE:
+ name = fdt_get_name(fdt, offset, NULL);
+ child = dt_new_root(name);
+ assert(child);
+ nextoffset = dt_expand_node(child, fdt, offset);
+
+ /*
+ * This may fail in case of duplicate, keep it
+ * going for now, we may ultimately want to
+ * assert
+ */
+ (void)dt_attach_root(node, child);
+ break;
+ case FDT_END:
+ return -1;
+ }
+ } while (tag != FDT_END_NODE);
+
+ return nextoffset;
+}
+
+void dt_expand(const void *fdt)
+{
+ printf("FDT: Parsing fdt @%p\n", fdt);
+
+ dt_root = dt_new_root("");
+
+ dt_expand_node(dt_root, fdt, 0);
+}
+
+u64 dt_get_number(const void *pdata, unsigned int cells)
+{
+ const u32 *p = pdata;
+ u64 ret = 0;
+
+ while(cells--)
+ ret = (ret << 32) | be32_to_cpu(*(p++));
+ return ret;
+}
+
+u32 dt_n_address_cells(const struct dt_node *node)
+{
+ if (!node->parent)
+ return 0;
+ return dt_prop_get_u32_def(node->parent, "#address-cells", 2);
+}
+
+u32 dt_n_size_cells(const struct dt_node *node)
+{
+ if (!node->parent)
+ return 0;
+ return dt_prop_get_u32_def(node->parent, "#size-cells", 1);
+}
+
+u64 dt_get_address(const struct dt_node *node, unsigned int index,
+ u64 *out_size)
+{
+ const struct dt_property *p;
+ u32 na = dt_n_address_cells(node);
+ u32 ns = dt_n_size_cells(node);
+ u32 pos, n;
+
+ p = dt_require_property(node, "reg", -1);
+ n = (na + ns) * sizeof(u32);
+ pos = n * index;
+ assert((pos + n) <= p->len);
+ if (out_size)
+ *out_size = dt_get_number(p->prop + pos + na * sizeof(u32), ns);
+ return dt_get_number(p->prop + pos, na);
+}
+
+static u32 __dt_get_chip_id(const struct dt_node *node)
+{
+ const struct dt_property *prop;
+
+ for (; node; node = node->parent) {
+ prop = dt_find_property(node, "ibm,chip-id");
+ if (prop)
+ return dt_property_get_cell(prop, 0);
+ }
+ return 0xffffffff;
+}
+
+u32 dt_get_chip_id(const struct dt_node *node)
+{
+ u32 id = __dt_get_chip_id(node);
+ assert(id != 0xffffffff);
+ return id;
+}
+
+struct dt_node *dt_find_compatible_node_on_chip(struct dt_node *root,
+ struct dt_node *prev,
+ const char *compat,
+ uint32_t chip_id)
+{
+ struct dt_node *node;
+
+ node = prev ? dt_next(root, prev) : root;
+ for (; node; node = dt_next(root, node)) {
+ u32 cid = __dt_get_chip_id(node);
+ if (cid == chip_id &&
+ dt_node_is_compatible(node, compat))
+ return node;
+ }
+ return NULL;
+}
+
+unsigned int dt_count_addresses(const struct dt_node *node)
+{
+ const struct dt_property *p;
+ u32 na = dt_n_address_cells(node);
+ u32 ns = dt_n_size_cells(node);
+ u32 n;
+
+ p = dt_require_property(node, "reg", -1);
+ n = (na + ns) * sizeof(u32);
+ return p->len / n;
+}
+
+u64 dt_translate_address(const struct dt_node *node, unsigned int index,
+ u64 *out_size)
+{
+ /* XXX TODO */
+ return dt_get_address(node, index, out_size);
+}
diff --git a/core/exceptions.c b/core/exceptions.c
new file mode 100644
index 0000000..995ca92
--- /dev/null
+++ b/core/exceptions.c
@@ -0,0 +1,529 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <stack.h>
+#include <opal.h>
+#include <processor.h>
+#include <cpu.h>
+
+static uint64_t client_mc_address;
+
+extern uint8_t exc_primary_start;
+extern uint8_t exc_primary_end;
+
+extern uint32_t exc_primary_patch_branch;
+
+extern uint8_t exc_secondary_start;
+extern uint8_t exc_secondary_end;
+
+extern uint32_t exc_secondary_patch_stack;
+extern uint32_t exc_secondary_patch_mfsrr0;
+extern uint32_t exc_secondary_patch_mfsrr1;
+extern uint32_t exc_secondary_patch_type;
+extern uint32_t exc_secondary_patch_mtsrr0;
+extern uint32_t exc_secondary_patch_mtsrr1;
+extern uint32_t exc_secondary_patch_rfid;
+
+struct lock hmi_lock = LOCK_UNLOCKED;
+
+#define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42))
+
+#define SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43,45))
+#define SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45))
+#define SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45))
+#define SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45))
+#define SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45))
+#define SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45))
+#define SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45))
+#define SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45))
+
+#define DSISR_MC_UE (PPC_BIT(48))
+#define DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
+#define DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
+#define DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
+#define DSISR_MC_TLB_MULTIHIT_MFSLB (PPC_BIT(55))
+#define DSISR_MC_TLB_MULTIHIT (PPC_BIT(53) | PPC_BIT(55))
+#define DSISR_MC_SLB_MULTIHIT (PPC_BIT(56))
+#define DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57))
+
+static void mce_set_ierror(struct opal_machine_check_event *mce, uint64_t srr1)
+{
+ switch (SRR1_MC_IFETCH(srr1)) {
+ case SRR1_MC_IFETCH_SLB_PARITY:
+ mce->error_type = OpalMCE_ERROR_TYPE_SLB;
+ mce->u.slb_error.slb_error_type = OpalMCE_SLB_ERROR_PARITY;
+ break;
+
+ case SRR1_MC_IFETCH_SLB_MULTIHIT:
+ mce->error_type = OpalMCE_ERROR_TYPE_SLB;
+ mce->u.slb_error.slb_error_type = OpalMCE_SLB_ERROR_MULTIHIT;
+ break;
+
+ case SRR1_MC_IFETCH_SLB_BOTH:
+ mce->error_type = OpalMCE_ERROR_TYPE_SLB;
+ mce->u.slb_error.slb_error_type =
+ OpalMCE_SLB_ERROR_INDETERMINATE;
+ break;
+
+ case SRR1_MC_IFETCH_TLB_MULTIHIT:
+ mce->error_type = OpalMCE_ERROR_TYPE_TLB;
+ mce->u.tlb_error.tlb_error_type = OpalMCE_TLB_ERROR_MULTIHIT;
+ break;
+
+ case SRR1_MC_IFETCH_UE:
+ case SRR1_MC_IFETCH_UE_IFU_INTERNAL:
+ mce->error_type = OpalMCE_ERROR_TYPE_UE;
+ mce->u.ue_error.ue_error_type = OpalMCE_UE_ERROR_IFETCH;
+ break;
+
+ case SRR1_MC_IFETCH_UE_TLB_RELOAD:
+ mce->error_type = OpalMCE_ERROR_TYPE_UE;
+ mce->u.ue_error.ue_error_type =
+ OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ }
+
+}
+
+static void mce_set_derror(struct opal_machine_check_event *mce, uint64_t dsisr)
+{
+ if (dsisr & DSISR_MC_UE) {
+ mce->error_type = OpalMCE_ERROR_TYPE_UE;
+ mce->u.ue_error.ue_error_type = OpalMCE_UE_ERROR_LOAD_STORE;
+
+ } else if (dsisr & DSISR_MC_UE_TABLEWALK) {
+ mce->error_type = OpalMCE_ERROR_TYPE_UE;
+ mce->u.ue_error.ue_error_type =
+ OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+
+ } else if (dsisr & DSISR_MC_ERAT_MULTIHIT) {
+ mce->error_type = OpalMCE_ERROR_TYPE_ERAT;
+ mce->u.erat_error.erat_error_type =
+ OpalMCE_ERAT_ERROR_MULTIHIT;
+
+ } else if (dsisr & DSISR_MC_TLB_MULTIHIT) {
+ mce->error_type = OpalMCE_ERROR_TYPE_TLB;
+ mce->u.tlb_error.tlb_error_type =
+ OpalMCE_TLB_ERROR_MULTIHIT;
+
+ } else if (dsisr & DSISR_MC_SLB_MULTIHIT) {
+ mce->error_type = OpalMCE_ERROR_TYPE_SLB;
+ mce->u.slb_error.slb_error_type =
+ OpalMCE_SLB_ERROR_MULTIHIT;
+
+ } else if (dsisr & DSISR_MC_SLB_MULTIHIT_PARITY) {
+ mce->error_type = OpalMCE_ERROR_TYPE_SLB;
+ mce->u.slb_error.slb_error_type =
+ OpalMCE_SLB_ERROR_INDETERMINATE;
+ }
+}
+
+/* Called from head.S, thus no prototype */
+void handle_machine_check(struct stack_frame *stack);
+
+void handle_machine_check(struct stack_frame *stack)
+{
+ struct opal_machine_check_event *mce;
+ uint64_t srr1, addr;
+
+ mce = &this_cpu()->mc_event;
+
+ /* This will occur if we get another MC between the time that
+ * we re-set MSR_ME, and the OS clears this flag.
+ *
+ * However, the alternative is keeping MSR_ME cleared, and letting
+ * the OS re-set it (after clearing the flag). However, we
+ * risk a checkstop, and an opal assert() is the better option.
+ */
+ assert(!mce->in_use);
+
+ mce->in_use = 1;
+
+ /* Populate generic machine check info */
+ mce->version = OpalMCE_V1;
+ mce->srr0 = stack->srr0;
+ mce->srr1 = stack->srr1;
+ mce->gpr3 = stack->gpr[3];
+
+ mce->initiator = OpalMCE_INITIATOR_CPU;
+ mce->disposition = OpalMCE_DISPOSITION_NOT_RECOVERED;
+ mce->severity = OpalMCE_SEV_ERROR_SYNC;
+
+ srr1 = stack->srr1;
+
+ /* Populate the mce error_type and type-specific error_type from either
+ * SRR1 or DSISR, depending whether this was a load/store or ifetch
+ * exception */
+ if (SRR1_MC_LOADSTORE(srr1)) {
+ mce_set_derror(mce, srr1);
+ addr = stack->srr0;
+ } else {
+ mce_set_ierror(mce, mfspr(SPR_DSISR));
+ addr = mfspr(SPR_DAR);
+ }
+
+ if (mce->error_type == OpalMCE_ERROR_TYPE_TLB) {
+ mce->u.tlb_error.effective_address_provided = true;
+ mce->u.tlb_error.effective_address = addr;
+
+ } else if (mce->error_type == OpalMCE_ERROR_TYPE_SLB) {
+ mce->u.slb_error.effective_address_provided = true;
+ mce->u.slb_error.effective_address = addr;
+
+ } else if (mce->error_type == OpalMCE_ERROR_TYPE_ERAT) {
+ mce->u.erat_error.effective_address_provided = true;
+ mce->u.erat_error.effective_address = addr;
+
+ } else if (mce->error_type == OpalMCE_ERROR_TYPE_UE) {
+ mce->u.ue_error.effective_address_provided = true;
+ mce->u.ue_error.effective_address = addr;
+ }
+
+ /* Setup stack to rfi into the OS' handler, with ME re-enabled. */
+ stack->gpr[3] = (uint64_t)mce;
+ stack->srr0 = client_mc_address;
+ stack->srr1 = mfmsr() | MSR_ME;
+}
+
+#define REG "%016llx"
+#define REGS_PER_LINE 4
+#define LAST_VOLATILE 13
+
+static void dump_regs(struct stack_frame *stack, uint64_t hmer)
+{
+ int i;
+ uint64_t tfmr;
+
+ if (hmer & SPR_HMER_MALFUNCTION_ALERT)
+ printf("HMI: malfunction Alert\n");
+ if (hmer & SPR_HMER_HYP_RESOURCE_ERR)
+ printf("HMI: Hypervisor resource error.\n");
+ if (hmer & SPR_HMER_TFAC_ERROR) {
+ tfmr = mfspr(SPR_TFMR);
+ printf("HMI: TFAC error: SPRN_TFMR = 0x%016llx\n", tfmr);
+ }
+ if (hmer & SPR_HMER_TFMR_PARITY_ERROR) {
+ tfmr = mfspr(SPR_TFMR);
+ printf("HMI: TFMR parity error: SPRN_TFMR = 0x%016llx\n", tfmr);
+ }
+ printf("TRAP: %04llx\n", stack->type);
+ printf("SRR0: "REG" SRR1: "REG"\n", stack->srr0, stack->srr1);
+ printf("CFAR: "REG" LR: "REG" CTR: "REG"\n",
+ stack->cfar, stack->lr, stack->ctr);
+ printf(" CR: %08x XER: %08x\n", stack->cr, stack->xer);
+
+ for (i = 0; i < 32; i++) {
+ if ((i % REGS_PER_LINE) == 0)
+ printf("\nGPR%02d: ", i);
+ printf(REG " ", stack->gpr[i]);
+ if (i == LAST_VOLATILE)
+ break;
+ }
+ printf("\n");
+}
+
+/*
+ * HMER register layout:
+ * +===+==========+============================+========+===================+
+ * |Bit|Name |Description |PowerKVM|Action |
+ * | | | |HMI | |
+ * | | | |enabled | |
+ * | | | |for this| |
+ * | | | |bit ? | |
+ * +===+==========+============================+========+===================+
+ * |0 |malfunctio|A processor core in the |Yes |Raise attn from |
+ * | |n_allert |system has checkstopped | |sapphire resulting |
+ * | | |(failed recovery) and has | |xstop |
+ * | | |requested a CP Sparing | | |
+ * | | |to occur. This is | | |
+ * | | |broadcasted to every | | |
+ * | | |processor in the system | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |1 |Reserved |reserved |n/a | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |2 |proc_recv_|Processor recovery occurred |Yes |Log message and |
+ * | |done |error-bit in fir not masked | |continue working. |
+ * | | |(see bit 11) | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |3 |proc_recv_|Processor went through |Yes |Log message and |
+ * | |error_mask|recovery for an error which | |continue working. |
+ * | |ed |is actually masked for | | |
+ * | | |reporting | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |4 | |Timer facility experienced |Yes |Raise attn from |
+ * | |tfac_error|an error. | |sapphire resulting |
+ * | | |TB, DEC, HDEC, PURR or SPURR| |xstop |
+ * | | |may be corrupted (details in| | |
+ * | | |TFMR) | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |5 | |TFMR SPR itself is |Yes |Raise attn from |
+ * | |tfmr_parit|corrupted. | |sapphire resulting |
+ * | |y_error |Entire timing facility may | |xstop |
+ * | | |be compromised. | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |6 |ha_overflo| UPS (Uniterrupted Power |No |N/A |
+ * | |w_warning |System) Overflow indication | | |
+ * | | |indicating that the UPS | | |
+ * | | |DirtyAddrTable has | | |
+ * | | |reached a limit where it | | |
+ * | | |requires PHYP unload support| | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |7 |reserved |reserved |n/a |n/a |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |8 |xscom_fail|An XSCOM operation caused by|No |We handle it by |
+ * | | |a cache inhibited load/store| |manually reading |
+ * | | |from this thread failed. A | |HMER register. |
+ * | | |trap register is | | |
+ * | | |available. | | |
+ * | | | | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |9 |xscom_done|An XSCOM operation caused by|No |We handle it by |
+ * | | |a cache inhibited load/store| |manually reading |
+ * | | |from this thread completed. | |HMER register. |
+ * | | |If hypervisor | | |
+ * | | |intends to use this bit, it | | |
+ * | | |is responsible for clearing | | |
+ * | | |it before performing the | | |
+ * | | |xscom operation. | | |
+ * | | |NOTE: this bit should always| | |
+ * | | |be masked in HMEER | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |10 |reserved |reserved |n/a |n/a |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |11 |proc_recv_|Processor recovery occurred |y |Log message and |
+ * | |again |again before bit2 or bit3 | |continue working. |
+ * | | |was cleared | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |12-|reserved |was temperature sensor |n/a |n/a |
+ * |15 | |passed the critical point on| | |
+ * | | |the way up | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |16 | |SCOM has set a reserved FIR |No |n/a |
+ * | |scom_fir_h|bit to cause recovery | | |
+ * | |m | | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |17 |trig_fir_h|Debug trigger has set a |No |n/a |
+ * | |mi |reserved FIR bit to cause | | |
+ * | | |recovery | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |18 |reserved |reserved |n/a |n/a |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |19 |reserved |reserved |n/a |n/a |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |20 |hyp_resour|A hypervisor resource error |y |Raise attn from |
+ * | |ce_err |occurred: data parity error | |sapphire resulting |
+ * | | |on, SPRC0:3; SPR_Modereg or | |xstop. |
+ * | | |HMEER. | | |
+ * | | |Note: this bit will cause an| | |
+ * | | |check_stop when (HV=1, PR=0 | | |
+ * | | |and EE=0) | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |21-| |if bit 8 is active, the |No |We handle it by |
+ * |23 |xscom_stat|reason will be detailed in | |Manually reading |
+ * | |us |these bits. see chapter 11.1| |HMER register. |
+ * | | |This bits are information | | |
+ * | | |only and always masked | | |
+ * | | |(mask = '0') | | |
+ * | | |If hypervisor intends to use| | |
+ * | | |this bit, it is responsible | | |
+ * | | |for clearing it before | | |
+ * | | |performing the xscom | | |
+ * | | |operation. | | |
+ * |---+----------+----------------------------+--------+-------------------|
+ * |24-|Not |Not implemented |n/a |n/a |
+ * |63 |implemente| | | |
+ * | |d | | | |
+ * +-- +----------+----------------------------+--------+-------------------+
+ *
+ * Above HMER bits can be enabled/disabled by modifying
+ * SPR_HMEER_HMI_ENABLE_MASK #define in include/processor.h
+ * If you modify support for any of the bits listed above, please make sure
+ * you change the above table to refelct that.
+ *
+ * NOTE: Per Dave Larson, never enable 8,9,21-23
+ */
+
+/* make compiler happy with a prototype */
+void handle_hmi(struct stack_frame *stack);
+
+void handle_hmi(struct stack_frame *stack)
+{
+ uint64_t hmer, orig_hmer;
+ bool assert = false;
+
+ orig_hmer = hmer = mfspr(SPR_HMER);
+ printf("HMI: Received HMI interrupt: HMER = 0x%016llx\n", hmer);
+ if (hmer & (SPR_HMER_PROC_RECV_DONE
+ | SPR_HMER_PROC_RECV_ERROR_MASKED)) {
+ hmer &= ~(SPR_HMER_PROC_RECV_DONE
+ | SPR_HMER_PROC_RECV_ERROR_MASKED);
+ printf("HMI: Processor recovery Done.\n");
+ }
+ if (hmer & SPR_HMER_PROC_RECV_AGAIN) {
+ hmer &= ~SPR_HMER_PROC_RECV_AGAIN;
+ printf("HMI: Processor recovery occurred again before"
+ "bit2 was cleared\n");
+ }
+ /* Assert if we see malfunction alert, we can not continue. */
+ if (hmer & SPR_HMER_MALFUNCTION_ALERT) {
+ hmer &= ~SPR_HMER_MALFUNCTION_ALERT;
+ assert = true;
+ }
+
+ /* Assert if we see Hypervisor resource error, we can not continue. */
+ if (hmer & SPR_HMER_HYP_RESOURCE_ERR) {
+ hmer &= ~SPR_HMER_HYP_RESOURCE_ERR;
+ assert = true;
+ }
+
+ /*
+ * Assert for now for all TOD errors. In future we need to decode
+ * TFMR and take corrective action wherever required.
+ */
+ if (hmer & (SPR_HMER_TFAC_ERROR | SPR_HMER_TFMR_PARITY_ERROR)) {
+ hmer &= ~(SPR_HMER_TFAC_ERROR | SPR_HMER_TFMR_PARITY_ERROR);
+ assert = true;
+ }
+
+ /*
+ * HMER bits are sticky, once set to 1 they remain set to 1 until
+ * they are set to 0. Reset the error source bit to 0, otherwise
+ * we keep getting HMI interrupt again and again.
+ */
+ mtspr(SPR_HMER, hmer);
+ if (!assert)
+ return;
+
+ /*
+ * Raise attn to crash.
+ *
+ * We get HMI on all threads at the same time. Using locks to avoid
+ * printf messages jumbled up.
+ */
+ lock(&hmi_lock);
+ dump_regs(stack, orig_hmer);
+ /* Should we unlock? We are going down anyway. */
+ unlock(&hmi_lock);
+ assert(false);
+}
+
+/* Called from head.S, thus no prototype */
+void exception_entry(struct stack_frame *stack);
+
+void exception_entry(struct stack_frame *stack)
+{
+ switch(stack->type) {
+ case STACK_ENTRY_MCHECK:
+ handle_machine_check(stack);
+ break;
+ case STACK_ENTRY_HMI:
+ handle_hmi(stack);
+ /* XXX TODO : Implement machine check */
+ break;
+ case STACK_ENTRY_SOFTPATCH:
+ /* XXX TODO : Implement softpatch ? */
+ break;
+ }
+}
+
+static int64_t patch_exception(uint64_t vector, uint64_t glue, bool hv)
+{
+ uint64_t iaddr;
+
+ /* Copy over primary exception handler */
+ memcpy((void *)vector, &exc_primary_start,
+ &exc_primary_end - &exc_primary_start);
+
+ /* Patch branch instruction in primary handler */
+ iaddr = vector + exc_primary_patch_branch;
+ *(uint32_t *)iaddr |= (glue - iaddr) & 0x03fffffc;
+
+ /* Copy over secondary exception handler */
+ memcpy((void *)glue, &exc_secondary_start,
+ &exc_secondary_end - &exc_secondary_start);
+
+ /* Patch-in the vector number */
+ *(uint32_t *)(glue + exc_secondary_patch_type) |= vector;
+
+ /*
+ * If machine check, patch GET_STACK to get to the MC stack
+ * instead of the normal stack.
+ *
+ * To simplify the arithmetic involved I make assumptions
+ * on the fact that the base of all CPU stacks is 64k aligned
+ * and that our stack size is < 32k, which means that the
+ * "addi" instruction used in GET_STACK() is always using a
+ * small (<32k) positive offset, which we can then easily
+ * fixup with a simple addition
+ */
+ BUILD_ASSERT(STACK_SIZE < 0x8000);
+ BUILD_ASSERT(!(CPU_STACKS_BASE & 0xffff));
+
+ if (vector == 0x200) {
+ /*
+ * The addi we try to patch is the 3rd instruction
+ * of GET_STACK(). If you change the macro, you must
+ * update this code
+ */
+ iaddr = glue + exc_secondary_patch_stack + 8;
+ *(uint32_t *)iaddr += MC_STACK_SIZE;
+ }
+
+ /* Standard exception ? All done */
+ if (!hv)
+ goto flush;
+
+ /* HV exception, change the SRR's to HSRRs and rfid to hrfid
+ *
+ * The magic is that mfspr/mtspr of SRR can be turned into the
+ * equivalent HSRR version by OR'ing 0x4800. For rfid to hrfid
+ * we OR 0x200.
+ */
+ *(uint32_t *)(glue + exc_secondary_patch_mfsrr0) |= 0x4800;
+ *(uint32_t *)(glue + exc_secondary_patch_mfsrr1) |= 0x4800;
+ *(uint32_t *)(glue + exc_secondary_patch_mtsrr0) |= 0x4800;
+ *(uint32_t *)(glue + exc_secondary_patch_mtsrr1) |= 0x4800;
+ *(uint32_t *)(glue + exc_secondary_patch_rfid) |= 0x200;
+
+ flush:
+ /* On P7 and later all we need is : */
+ sync_icache();
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t opal_register_exc_handler(uint64_t opal_exception,
+ uint64_t handler_address,
+ uint64_t glue_cache_line)
+{
+ switch(opal_exception) {
+ case OPAL_MACHINE_CHECK_HANDLER:
+ client_mc_address = handler_address;
+ return patch_exception(0x200, glue_cache_line, false);
+ case OPAL_HYPERVISOR_MAINTENANCE_HANDLER:
+ return patch_exception(0xe60, glue_cache_line, true);
+#if 0 /* We let Linux handle softpatch */
+ case OPAL_SOFTPATCH_HANDLER:
+ return patch_exception(0x1500, glue_cache_line, true);
+#endif
+ default:
+ break;
+ }
+ return OPAL_PARAMETER;
+}
+opal_call(OPAL_REGISTER_OPAL_EXCEPTION_HANDLER, opal_register_exc_handler, 3);
+
diff --git a/core/fast-reboot.c b/core/fast-reboot.c
new file mode 100644
index 0000000..49b80b6
--- /dev/null
+++ b/core/fast-reboot.c
@@ -0,0 +1,346 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <cpu.h>
+#include <fsp.h>
+#include <psi.h>
+#include <opal.h>
+#include <xscom.h>
+#include <interrupts.h>
+#include <cec.h>
+#include <timebase.h>
+#include <memory.h>
+#include <pci.h>
+#include <chip.h>
+
+/*
+ * To get control of all threads, we sreset them via XSCOM after
+ * patching the 0x100 vector. This will work as long as the target
+ * HRMOR is 0. If Linux ever uses HRMOR, we'll have to consider
+ * a more messy approach.
+ *
+ * The SCOM register we want is called "Core RAS Control" in the doc
+ * and EX0.EC.PC.TCTL_GENERATE#0.TCTL.DIRECT_CONTROLS in the SCOM list
+ *
+ * Bits in there change from CPU rev to CPU rev but the bit we care
+ * about, bit 60 "sreset_request" appears to have stuck to the same
+ * place in both P7 and P7+. The register also has the same SCOM
+ * address
+ */
+#define EX0_TCTL_DIRECT_CONTROLS0 0x08010400
+#define EX0_TCTL_DIRECT_CONTROLS1 0x08010440
+#define EX0_TCTL_DIRECT_CONTROLS2 0x08010480
+#define EX0_TCTL_DIRECT_CONTROLS3 0x080104c0
+#define TCTL_DC_SRESET_REQUEST PPC_BIT(60)
+
+/* Flag tested by the OPAL entry code */
+uint8_t reboot_in_progress;
+static struct cpu_thread *resettor, *resettee;
+
+static void flush_caches(void)
+{
+ uint64_t base = SKIBOOT_BASE;
+ uint64_t end = base + SKIBOOT_SIZE;
+
+ /* Not sure what the effect of sreset is on cores, so let's
+ * shoot a series of dcbf's on all cachelines that make up
+ * our core memory just in case...
+ */
+ while(base < end) {
+ asm volatile("dcbf 0,%0" : : "r" (base) : "memory");
+ base += 128;
+ }
+ sync();
+}
+
+static bool do_reset_core_p7(struct cpu_thread *cpu)
+{
+ uint32_t xscom_addr, chip;
+ uint64_t ctl;
+ int rc;
+
+ /* Add the Core# */
+ xscom_addr = EX0_TCTL_DIRECT_CONTROLS0;
+ xscom_addr |= ((cpu->pir >> 2) & 7) << 24;
+
+ chip = pir_to_chip_id(cpu->pir);
+
+ ctl = TCTL_DC_SRESET_REQUEST;
+ rc = xscom_write(chip, xscom_addr, ctl);
+ rc |= xscom_write(chip, xscom_addr + 0x40, ctl);
+ rc |= xscom_write(chip, xscom_addr + 0x80, ctl);
+ rc |= xscom_write(chip, xscom_addr + 0xc0, ctl);
+ if (rc) {
+ prerror("RESET: Error %d resetting CPU 0x%04x\n",
+ rc, cpu->pir);
+ return false;
+ }
+ return true;
+}
+
+static void fast_reset_p7(void)
+{
+ struct cpu_thread *cpu;
+
+ resettee = this_cpu();
+ resettor = NULL;
+
+ /* Pick up a candidate resettor. We do that before we flush
+ * the caches
+ */
+ for_each_cpu(cpu) {
+ /*
+ * Some threads might still be in skiboot.
+ *
+ * But because we deal with entire cores and we don't want
+ * to special case things, we are just going to reset them
+ * too making the assumption that this is safe, they are
+ * holding no locks. This can only be true if they don't
+ * have jobs scheduled which is hopefully the case.
+ */
+ if (cpu->state != cpu_state_os &&
+ cpu->state != cpu_state_active)
+ continue;
+
+ /*
+ * Only hit cores and only if they aren't on the same core
+ * as ourselves
+ */
+ if (cpu_get_thread0(cpu) == cpu_get_thread0(this_cpu()) ||
+ cpu->pir & 0x3)
+ continue;
+
+ /* Pick up one of those guys as our "resettor". It will be
+ * in charge of resetting this CPU. We avoid resetting
+ * ourselves, not sure how well it would do with SCOM
+ */
+ resettor = cpu;
+ break;
+ }
+
+ if (!resettor) {
+ printf("RESET: Can't find a resettor !\n");
+ return;
+ }
+ printf("RESET: Resetting from 0x%04x, resettor 0x%04x\n",
+ this_cpu()->pir, resettor->pir);
+
+ printf("RESET: Flushing caches...\n");
+
+ /* Is that necessary ? */
+ flush_caches();
+
+ /* Reset everybody except self and except resettor */
+ for_each_cpu(cpu) {
+ if (cpu->state != cpu_state_os &&
+ cpu->state != cpu_state_active)
+ continue;
+ if (cpu_get_thread0(cpu) == cpu_get_thread0(this_cpu()) ||
+ cpu->pir & 0x3)
+ continue;
+ if (cpu_get_thread0(cpu) == cpu_get_thread0(resettor))
+ continue;
+
+ printf("RESET: Resetting CPU 0x%04x...\n", cpu->pir);
+
+ if (!do_reset_core_p7(cpu))
+ return;
+ }
+
+ /* Reset the resettor last because it's going to kill me ! */
+ printf("RESET: Resetting CPU 0x%04x...\n", resettor->pir);
+ if (!do_reset_core_p7(resettor))
+ return;
+
+ /* Don't return */
+ for (;;)
+ ;
+}
+
+void fast_reset(void)
+{
+ uint32_t pvr = mfspr(SPR_PVR);
+ extern uint32_t fast_reset_patch_start;
+ extern uint32_t fast_reset_patch_end;
+ uint32_t *dst, *src;
+
+ printf("RESET: Fast reboot request !\n");
+
+ /* XXX We need a way to ensure that no other CPU is in skiboot
+ * holding locks (via the OPAL APIs) and if they are, we need
+ * for them to get out
+ */
+ reboot_in_progress = 1;
+ time_wait_ms(200);
+
+ /* Copy reset trampoline */
+ printf("RESET: Copying reset trampoline...\n");
+ src = &fast_reset_patch_start;
+ dst = (uint32_t *)0x100;
+ while(src < &fast_reset_patch_end)
+ *(dst++) = *(src++);
+ sync_icache();
+
+ switch(PVR_TYPE(pvr)) {
+ case PVR_TYPE_P7:
+ case PVR_TYPE_P7P:
+ fast_reset_p7();
+ }
+}
+
+static void cleanup_cpu_state(void)
+{
+ if (cpu_is_thread0(this_cpu())) {
+ cleanup_tlb();
+ init_shared_sprs();
+ }
+ init_replicated_sprs();
+ reset_cpu_icp();
+}
+
+#ifdef FAST_REBOOT_CLEARS_MEMORY
+static void fast_mem_clear(uint64_t start, uint64_t end)
+{
+ printf("MEMORY: Clearing %llx..%llx\n", start, end);
+
+ while(start < end) {
+ asm volatile("dcbz 0,%0" : : "r" (start) : "memory");
+ start += 128;
+ }
+}
+
+static void memory_reset(void)
+{
+ struct address_range *i;
+ uint64_t skistart = SKIBOOT_BASE;
+ uint64_t skiend = SKIBOOT_BASE + SKIBOOT_SIZE;
+
+ printf("MEMORY: Clearing ...\n");
+
+ list_for_each(&address_ranges, i, list) {
+ uint64_t start = cleanup_addr(i->arange->start);
+ uint64_t end = cleanup_addr(i->arange->end);
+
+ if (start >= skiend || end <= skistart)
+ fast_mem_clear(start, end);
+ else {
+ if (start < skistart)
+ fast_mem_clear(start, skistart);
+ if (end > skiend)
+ fast_mem_clear(skiend, end);
+ }
+ }
+}
+#endif /* FAST_REBOOT_CLEARS_MEMORY */
+
+/* Entry from asm after a fast reset */
+void fast_reboot(void);
+
+void fast_reboot(void)
+{
+ static volatile bool fast_boot_release;
+ struct cpu_thread *cpu;
+
+ printf("INIT: CPU PIR 0x%04x reset in\n", this_cpu()->pir);
+
+ /* If this CPU was chosen as the resettor, it must reset the
+ * resettee (the one that initiated the whole process
+ */
+ if (this_cpu() == resettor)
+ do_reset_core_p7(resettee);
+
+ /* Are we the original boot CPU ? If not, we spin waiting
+ * for a relase signal from CPU 1, then we clean ourselves
+ * up and go processing jobs.
+ */
+ if (this_cpu() != boot_cpu) {
+ this_cpu()->state = cpu_state_present;
+ while (!fast_boot_release) {
+ smt_very_low();
+ sync();
+ }
+ smt_medium();
+ cleanup_cpu_state();
+ __secondary_cpu_entry();
+ }
+
+ /* We are the original boot CPU, wait for secondaries to
+ * be captured
+ */
+ for_each_cpu(cpu) {
+ if (cpu == this_cpu())
+ continue;
+
+ /* XXX Add a callin timeout ? */
+ while (cpu->state != cpu_state_present) {
+ smt_very_low();
+ sync();
+ }
+ smt_medium();
+ }
+
+ printf("INIT: Releasing secondaries...\n");
+
+ /* Release everybody */
+ fast_boot_release = true;
+ sync();
+
+ /* Wait for them to respond */
+ for_each_cpu(cpu) {
+ if (cpu == this_cpu())
+ continue;
+
+ /* XXX Add a callin timeout ? */
+ while (cpu->state == cpu_state_present) {
+ smt_very_low();
+ sync();
+ }
+ }
+
+ printf("INIT: All done, resetting everything else...\n");
+
+ /* Clear release flag for next time */
+ fast_boot_release = false;
+ reboot_in_progress = 0;
+
+ /* Cleanup ourselves */
+ cleanup_cpu_state();
+
+ /* Set our state to active */
+ this_cpu()->state = cpu_state_active;
+
+ /* Poke the consoles (see comments in the code there) */
+ fsp_console_reset();
+
+ /* Reset/EOI the PSI interrupt */
+ psi_irq_reset();
+
+ /* Remove all PCI devices */
+ pci_reset();
+
+ /* Reset IO Hubs */
+ cec_reset();
+
+ /* Re-Initialize all discovered PCI slots */
+ pci_init_slots();
+
+ /* Clear memory */
+#ifdef FAST_REBOOT_CLEARS_MEMORY
+ memory_reset();
+#endif
+ load_and_boot_kernel(true);
+}
diff --git a/core/fdt.c b/core/fdt.c
new file mode 100644
index 0000000..62e60fc
--- /dev/null
+++ b/core/fdt.c
@@ -0,0 +1,208 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <stdarg.h>
+#include <libfdt.h>
+#include <device.h>
+#include <cpu.h>
+#include <memory.h>
+#include <opal.h>
+#include <interrupts.h>
+#include <fsp.h>
+#include <cec.h>
+#include <vpd.h>
+#include <ccan/str/str.h>
+
+static int fdt_error;
+static void *fdt;
+
+#undef DEBUG_FDT
+
+static void __save_err(int err, const char *str)
+{
+#ifdef DEBUG_FDT
+ printf("FDT: rc: %d from \"%s\"\n", err, str);
+#endif
+ if (err && !fdt_error) {
+ prerror("FDT: Error %d from \"%s\"\n", err, str);
+ fdt_error = err;
+ }
+}
+
+#define save_err(...) __save_err(__VA_ARGS__, #__VA_ARGS__)
+
+static void dt_property_cell(const char *name, u32 cell)
+{
+ save_err(fdt_property_cell(fdt, name, cell));
+}
+
+static void dt_begin_node(const char *name, uint32_t phandle)
+{
+ save_err(fdt_begin_node(fdt, name));
+
+ /*
+ * We add both the new style "phandle" and the legacy
+ * "linux,phandle" properties
+ */
+ dt_property_cell("linux,phandle", phandle);
+ dt_property_cell("phandle", phandle);
+}
+
+static void dt_property(const char *name, const void *val, size_t size)
+{
+ save_err(fdt_property(fdt, name, val, size));
+}
+
+static void dt_end_node(void)
+{
+ save_err(fdt_end_node(fdt));
+}
+
+static void dump_fdt(void)
+{
+#ifdef DEBUG_FDT
+ int i, off, depth, err;
+
+ printf("Device tree %u@%p\n", fdt_totalsize(fdt), fdt);
+
+ err = fdt_check_header(fdt);
+ if (err) {
+ prerror("fdt_check_header: %s\n", fdt_strerror(err));
+ return;
+ }
+ printf("fdt_check_header passed\n");
+
+ printf("fdt_num_mem_rsv = %u\n", fdt_num_mem_rsv(fdt));
+ for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
+ u64 addr, size;
+
+ err = fdt_get_mem_rsv(fdt, i, &addr, &size);
+ if (err) {
+ printf(" ERR %s\n", fdt_strerror(err));
+ return;
+ }
+ printf(" mem_rsv[%i] = %lu@%#lx\n", i, (long)addr, (long)size);
+ }
+
+ for (off = fdt_next_node(fdt, 0, &depth);
+ off > 0;
+ off = fdt_next_node(fdt, off, &depth)) {
+ int len;
+ const char *name;
+
+ name = fdt_get_name(fdt, off, &len);
+ if (!name) {
+ prerror("fdt: offset %i no name!\n", off);
+ return;
+ }
+ printf("name: %s [%u]\n", name, off);
+ }
+#endif
+}
+
+static void flatten_dt_node(const struct dt_node *root)
+{
+ const struct dt_node *i;
+ const struct dt_property *p;
+
+#ifdef DEBUG_FDT
+ printf("FDT: node: %s\n", root->name);
+#endif
+
+ list_for_each(&root->properties, p, list) {
+ if (strstarts(p->name, DT_PRIVATE))
+ continue;
+#ifdef DEBUG_FDT
+ printf("FDT: prop: %s size: %ld\n", p->name, p->len);
+#endif
+ dt_property(p->name, p->prop, p->len);
+ }
+
+ list_for_each(&root->children, i, list) {
+ dt_begin_node(i->name, i->phandle);
+ flatten_dt_node(i);
+ dt_end_node();
+ }
+}
+
+static void create_dtb_reservemap(const struct dt_node *root)
+{
+ uint64_t base, size;
+ const uint64_t *ranges;
+ const struct dt_property *prop;
+ int i;
+
+ /* Duplicate the reserved-ranges property into the fdt reservemap */
+ prop = dt_find_property(root, "reserved-ranges");
+ if (prop) {
+ ranges = (const void *)prop->prop;
+
+ for (i = 0; i < prop->len / (sizeof(uint64_t) * 2); i++) {
+ base = *(ranges++);
+ size = *(ranges++);
+ save_err(fdt_add_reservemap_entry(fdt, base, size));
+ }
+ }
+
+ save_err(fdt_finish_reservemap(fdt));
+}
+
+void *create_dtb(const struct dt_node *root)
+{
+ size_t len = DEVICE_TREE_MAX_SIZE;
+ uint32_t old_last_phandle = last_phandle;
+
+ do {
+ if (fdt)
+ free(fdt);
+ last_phandle = old_last_phandle;
+ fdt_error = 0;
+ fdt = malloc(len);
+ if (!fdt) {
+ prerror("dtb: could not malloc %lu\n", (long)len);
+ return NULL;
+ }
+
+ fdt_create(fdt, len);
+
+ create_dtb_reservemap(root);
+
+ /* Open root node */
+ dt_begin_node(root->name, root->phandle);
+
+ /* Unflatten our live tree */
+ flatten_dt_node(root);
+
+ /* Close root node */
+ dt_end_node();
+
+ save_err(fdt_finish(fdt));
+
+ if (!fdt_error)
+ break;
+
+ len *= 2;
+ } while (fdt_error == -FDT_ERR_NOSPACE);
+
+ dump_fdt();
+
+ if (fdt_error) {
+ prerror("dtb: error %s\n", fdt_strerror(fdt_error));
+ return NULL;
+ }
+ return fdt;
+}
diff --git a/core/flash-nvram.c b/core/flash-nvram.c
new file mode 100644
index 0000000..7e261b1
--- /dev/null
+++ b/core/flash-nvram.c
@@ -0,0 +1,76 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <device.h>
+#include <console.h>
+#include <opal.h>
+#include <platform.h>
+#include <libflash/libflash.h>
+
+static struct flash_chip *fl_nv_chip;
+static uint32_t fl_nv_start, fl_nv_size;
+
+static int flash_nvram_info(uint32_t *total_size)
+{
+ if (!fl_nv_chip)
+ return OPAL_HARDWARE;
+ *total_size = fl_nv_size;
+ return OPAL_SUCCESS;
+}
+
+static int flash_nvram_start_read(void *dst, uint32_t src, uint32_t len)
+{
+ int rc;
+
+ if ((src + len) > fl_nv_size) {
+ prerror("FLASH_NVRAM: read out of bound (0x%x,0x%x)\n",
+ src, len);
+ return OPAL_PARAMETER;
+ }
+ rc = flash_read(fl_nv_chip, fl_nv_start + src, dst, len);
+ if (rc)
+ return rc;
+ nvram_read_complete(true);
+ return 0;
+}
+
+static int flash_nvram_write(uint32_t dst, void *src, uint32_t len)
+{
+ /* TODO: When we have async jobs for PRD, turn this into one */
+
+ if ((dst + len) > fl_nv_size) {
+ prerror("FLASH_NVRAM: write out of bound (0x%x,0x%x)\n",
+ dst, len);
+ return OPAL_PARAMETER;
+ }
+ return flash_smart_write(fl_nv_chip, fl_nv_start + dst, src, len);
+}
+
+int flash_nvram_init(struct flash_chip *chip, uint32_t start, uint32_t size)
+{
+ fl_nv_chip = chip;
+ fl_nv_start = start;
+ fl_nv_size = size;
+
+ platform.nvram_info = flash_nvram_info;
+ platform.nvram_start_read = flash_nvram_start_read;
+ platform.nvram_write = flash_nvram_write;
+
+ return 0;
+}
+
diff --git a/core/hostservices.c b/core/hostservices.c
new file mode 100644
index 0000000..85e62e3
--- /dev/null
+++ b/core/hostservices.c
@@ -0,0 +1,826 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <lock.h>
+#include <device.h>
+#include <compiler.h>
+#include <hostservices.h>
+#include <mem_region.h>
+#include <xscom.h>
+#include <fsp.h>
+#include <chip.h>
+#include <console.h>
+#include <mem-map.h>
+#include <timebase.h>
+
+#define HOSTBOOT_RUNTIME_INTERFACE_VERSION 1
+
+struct host_interfaces {
+ /** Interface version. */
+ uint64_t interface_version;
+
+ /** Put a string to the console. */
+ void (*puts)(const char*);
+ /** Critical failure in runtime execution. */
+ void (*assert)(void);
+
+ /** OPTIONAL. Hint to environment that the page may be executed. */
+ int (*set_page_execute)(void*);
+
+ /** malloc */
+ void *(*malloc)(size_t);
+ /** free */
+ void (*free)(void*);
+ /** realloc */
+ void *(*realloc)(void*, size_t);
+
+ /** sendErrorLog
+ * @param[in] plid Platform Log identifier
+ * @param[in] data size in bytes
+ * @param[in] pointer to data
+ * @return 0 on success else error code
+ */
+ int (*send_error_log)(uint32_t,uint32_t,void *);
+
+ /** Scan communication read
+ * @param[in] chip_id (based on devtree defn)
+ * @param[in] address
+ * @param[in] pointer to 8-byte data buffer
+ * @return 0 on success else return code
+ */
+ int (*scom_read)(uint64_t, uint64_t, void*);
+
+ /** Scan communication write
+ * @param[in] chip_id (based on devtree defn)
+ * @param[in] address
+ * @param[in] pointer to 8-byte data buffer
+ * @return 0 on success else return code
+ */
+ int (*scom_write)(uint64_t, uint64_t, const void *);
+
+ /** lid_load
+ * Load a LID from PNOR, FSP, etc.
+ *
+ * @param[in] LID number.
+ * @param[out] Allocated buffer for LID.
+ * @param[out] Size of LID (in bytes).
+ *
+ * @return 0 on success, else RC.
+ */
+ int (*lid_load)(uint32_t lid, void **buf, size_t *len);
+
+ /** lid_unload
+ * Release memory from previously loaded LID.
+ *
+ * @param[in] Allocated buffer for LID to release.
+ *
+ * @return 0 on success, else RC.
+ */
+ int (*lid_unload)(void *buf);
+
+ /** Get the address of a reserved memory region by its devtree name.
+ *
+ * @param[in] Devtree name (ex. "ibm,hbrt-vpd-image")
+ * @return physical address of region (or NULL).
+ **/
+ uint64_t (*get_reserved_mem)(const char*);
+
+ /**
+ * @brief Force a core to be awake, or clear the force
+ * @param[in] i_core Core to wake up (pid)
+ * @param[in] i_mode 0=force awake
+ * 1=clear force
+ * 2=clear all previous forces
+ * @return rc non-zero on error
+ */
+ int (*wakeup)( uint32_t i_core, uint32_t i_mode );
+
+ /**
+ * @brief Delay/sleep for at least the time given
+ * @param[in] seconds
+ * @param[in] nano seconds
+ */
+ void (*nanosleep)(uint64_t i_seconds, uint64_t i_nano_seconds);
+
+ // Reserve some space for future growth.
+ void (*reserved[32])(void);
+};
+
+struct runtime_interfaces {
+ /** Interface version. */
+ uint64_t interface_version;
+
+ /** Execute CxxTests that may be contained in the image.
+ *
+ * @param[in] - Pointer to CxxTestStats structure for results reporting.
+ */
+ void (*cxxtestExecute)(void *);
+ /** Get a list of lids numbers of the lids known to HostBoot
+ *
+ * @param[out] o_num - the number of lids in the list
+ * @return a pointer to the list
+ */
+ const uint32_t * (*get_lid_list)(size_t * o_num);
+
+ /** Load OCC Image and common data into mainstore, also setup OCC BARSs
+ *
+ * @param[in] i_homer_addr_phys - The physical mainstore address of the
+ * start of the HOMER image
+ * @param[in] i_homer_addr_va - Virtual memory address of the HOMER image
+ * @param[in] i_common_addr_phys - The physical mainstore address of the
+ * OCC common area.
+ * @param[in] i_common_addr_va - Virtual memory address of the common area
+ * @param[in] i_chip - The HW chip id (XSCOM chip ID)
+ * @return 0 on success else return code
+ */
+ int(*loadOCC)(uint64_t i_homer_addr_phys,
+ uint64_t i_homer_addr_va,
+ uint64_t i_common_addr_phys,
+ uint64_t i_common_addr_va,
+ uint64_t i_chip);
+
+ /** Start OCC on all chips, by module
+ *
+ * @param[in] i_chip - Array of functional HW chip ids
+ * @Note The caller must include a complete modules worth of chips
+ * @param[in] i_num_chips - Number of chips in the array
+ * @return 0 on success else return code
+ */
+ int (*startOCCs)(uint64_t* i_chip,
+ size_t i_num_chips);
+
+ /** Stop OCC hold OCCs in reset
+ *
+ * @param[in] i_chip - Array of functional HW chip ids
+ * @Note The caller must include a complete modules worth of chips
+ * @param[in] i_num_chips - Number of chips in the array
+ * @return 0 on success else return code
+ */
+ int (*stopOCCs)(uint64_t* i_chip,
+ size_t i_num_chips);
+
+ /* Reserve some space for future growth. */
+ void (*reserved[32])(void);
+};
+
+static struct runtime_interfaces *hservice_runtime;
+
+static char *hbrt_con_buf = (char *)HBRT_CON_START;
+static size_t hbrt_con_pos;
+static bool hbrt_con_wrapped;
+
+#define HBRT_CON_IN_LEN 0
+#define HBRT_CON_OUT_LEN (HBRT_CON_LEN - HBRT_CON_IN_LEN)
+
+struct memcons hbrt_memcons __section(".data.memcons") = {
+ .magic = MEMCONS_MAGIC,
+ .obuf_phys = HBRT_CON_START,
+ .ibuf_phys = HBRT_CON_START + HBRT_CON_OUT_LEN,
+ .obuf_size = HBRT_CON_OUT_LEN,
+ .ibuf_size = HBRT_CON_IN_LEN,
+};
+
+static void hservice_putc(char c)
+{
+ uint32_t opos;
+
+ hbrt_con_buf[hbrt_con_pos++] = c;
+ if (hbrt_con_pos >= HBRT_CON_OUT_LEN) {
+ hbrt_con_pos = 0;
+ hbrt_con_wrapped = true;
+ }
+
+ /*
+ * We must always re-generate memcons.out_pos because
+ * under some circumstances, the console script will
+ * use a broken putmemproc that does RMW on the full
+ * 8 bytes containing out_pos and in_prod, thus corrupting
+ * out_pos
+ */
+ opos = hbrt_con_pos;
+ if (hbrt_con_wrapped)
+ opos |= MEMCONS_OUT_POS_WRAP;
+ lwsync();
+ hbrt_memcons.out_pos = opos;
+}
+
+static void hservice_puts(const char *str)
+{
+ char c;
+
+ while((c = *(str++)) != 0)
+ hservice_putc(c);
+ hservice_putc(10);
+}
+
+static void hservice_mark(void)
+{
+ hservice_puts("--------------------------------------------------"
+ "--------------------------------------------------\n");
+}
+
+static void hservice_assert(void)
+{
+ prerror("HBRT: Assertion from hostservices\n");
+ abort();
+}
+
+static void *hservice_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+static void hservice_free(void *ptr)
+{
+ free(ptr);
+}
+
+
+static void *hservice_realloc(void *ptr, size_t size)
+{
+ return realloc(ptr, size);
+}
+
+struct hbrt_elog_ent {
+ void *buf;
+ unsigned int size;
+ unsigned int plid;
+ struct list_node link;
+};
+static LIST_HEAD(hbrt_elogs);
+static struct lock hbrt_elog_lock = LOCK_UNLOCKED;
+static bool hbrt_elog_sending;
+static void hservice_start_elog_send(void);
+
+static void hservice_elog_write_complete(struct fsp_msg *msg)
+{
+ struct hbrt_elog_ent *ent = msg->user_data;
+
+ lock(&hbrt_elog_lock);
+ printf("HBRT: Completed send of PLID 0x%08x\n", ent->plid);
+ hbrt_elog_sending = false;
+ fsp_tce_unmap(PSI_DMA_HBRT_LOG_WRITE_BUF,
+ PSI_DMA_HBRT_LOG_WRITE_BUF_SZ);
+ free(ent->buf);
+ free(ent);
+ fsp_freemsg(msg);
+ hservice_start_elog_send();
+ unlock(&hbrt_elog_lock);
+}
+
+static void hservice_start_elog_send(void)
+{
+ struct fsp_msg *msg;
+ struct hbrt_elog_ent *ent;
+
+ again:
+ if (list_empty(&hbrt_elogs))
+ return;
+ ent = list_pop(&hbrt_elogs, struct hbrt_elog_ent, link);
+
+ hbrt_elog_sending = true;
+
+ printf("HBRT: Starting send of PLID 0x%08x\n", ent->plid);
+
+ fsp_tce_map(PSI_DMA_HBRT_LOG_WRITE_BUF, ent->buf,
+ PSI_DMA_HBRT_LOG_WRITE_BUF_SZ);
+
+ msg = fsp_mkmsg(FSP_CMD_WRITE_SP_DATA, 6, FSP_DATASET_HBRT_BLOB,
+ 0, 0, 0, PSI_DMA_HBRT_LOG_WRITE_BUF,
+ ent->size);
+
+ if (!msg) {
+ prerror("HBRT: Failed to create error msg log to FSP\n");
+ goto error;
+ }
+ msg->user_data = ent;
+ if (!fsp_queue_msg(msg, hservice_elog_write_complete))
+ return;
+ prerror("FSP: Error queueing elog update\n");
+ error:
+ if (msg)
+ fsp_freemsg(msg);
+ fsp_tce_unmap(PSI_DMA_HBRT_LOG_WRITE_BUF,
+ PSI_DMA_HBRT_LOG_WRITE_BUF_SZ);
+ free(ent->buf);
+ free(ent);
+ hbrt_elog_sending = false;
+ goto again;
+}
+
+static int hservice_send_error_log(uint32_t plid, uint32_t dsize, void *data)
+{
+ struct hbrt_elog_ent *ent;
+ void *abuf;
+
+ printf("HBRT: Error log generated with plid 0x%08x\n", plid);
+
+ /* We only know how to send error logs to FSP */
+ if (!fsp_present()) {
+ prerror("HBRT: Warning, error log from HBRT discarded !\n");
+ return OPAL_UNSUPPORTED;
+ }
+ if (dsize > PSI_DMA_HBRT_LOG_WRITE_BUF_SZ) {
+ prerror("HBRT: Warning, error log from HBRT too big (%d) !\n",
+ dsize);
+ dsize = PSI_DMA_HBRT_LOG_WRITE_BUF_SZ;
+ }
+
+ lock(&hbrt_elog_lock);
+
+ /* Create and populate a tracking structure */
+ ent = zalloc(sizeof(struct hbrt_elog_ent));
+ if (!ent) {
+ unlock(&hbrt_elog_lock);
+ return OPAL_NO_MEM;
+ }
+
+ /* Grab a 4k aligned page */
+ abuf = memalign(0x1000, PSI_DMA_HBRT_LOG_WRITE_BUF_SZ);
+ if (!abuf) {
+ free(ent);
+ unlock(&hbrt_elog_lock);
+ return OPAL_NO_MEM;
+ }
+ memset(abuf, 0, PSI_DMA_HBRT_LOG_WRITE_BUF_SZ);
+ memcpy(abuf, data, dsize);
+ ent->buf = abuf;
+ ent->size = dsize;
+ ent->plid = plid;
+ list_add_tail(&hbrt_elogs, &ent->link);
+ if (!hbrt_elog_sending)
+ hservice_start_elog_send();
+ unlock(&hbrt_elog_lock);
+
+ return 0;
+}
+
+static int hservice_scom_read(uint64_t chip_id, uint64_t addr, void *buf)
+{
+ return xscom_read(chip_id, addr, buf);
+}
+
+static int hservice_scom_write(uint64_t chip_id, uint64_t addr,
+ const void *buf)
+{
+ uint64_t val;
+
+ memcpy(&val, buf, sizeof(val));
+ return xscom_write(chip_id, addr, val);
+}
+
+static int hservice_lid_load(uint32_t lid, void **buf, size_t *len)
+{
+ int rc;
+ static void *lid_cache;
+ static size_t lid_cache_len;
+ static uint32_t lid_cache_id;
+
+ printf("HBRT: LID load request for 0x%08x\n", lid);
+
+ /* Adjust LID side first or we get a cache mismatch */
+ lid = fsp_adjust_lid_side(lid);
+
+ /* Check for cache */
+ if (lid_cache && lid_cache_id == lid) {
+ *buf = lid_cache;
+ *len = lid_cache_len;
+ printf("HBRT: Serviced from cache, len=0x%lx\n", lid_cache_len);
+ return 0;
+ }
+
+ /* Cache mismatch, discard old one */
+ if (lid_cache) {
+ printf("HBRT: Cache mismatch, discarding old 0x%08x\n",
+ lid_cache_id);
+ free(lid_cache);
+ lid_cache = NULL;
+ }
+
+ /* Allocate a new buffer and load the LID into it */
+ *buf = malloc(HBRT_LOAD_LID_SIZE);
+ *len = HBRT_LOAD_LID_SIZE;
+ rc = fsp_fetch_data(0, FSP_DATASET_NONSP_LID, lid, 0, *buf, len);
+ if (rc != 0)
+ /* Take advantage of realloc corner case here. */
+ *len = 0;
+ *buf = realloc(*buf, *len);
+
+ /* We managed, let's cache it */
+ if (rc == 0 && *len) {
+ lid_cache = *buf;
+ lid_cache_len = *len;
+ lid_cache_id = lid;
+
+ printf("HBRT: LID 0x%08x successfully loaded and cached"
+ ", len=0x%lx\n", lid, lid_cache_len);
+ }
+
+ return rc;
+}
+
+static int hservice_lid_unload(void *buf __unused)
+{
+ /* We do nothing as the LID is held in cache */
+ return 0;
+}
+
+static uint64_t hservice_get_reserved_mem(const char *name)
+{
+ struct mem_region *region;
+ uint64_t ret;
+
+ /* We assume it doesn't change after we've unlocked it, but
+ * lock ensures list is safe to walk. */
+ lock(&mem_region_lock);
+ region = find_mem_region(name);
+ ret = region ? region->start : 0;
+ unlock(&mem_region_lock);
+
+ if (!ret)
+ prerror("HBRT: Mem region '%s' not found !\n", name);
+
+ return ret;
+}
+
+static void hservice_nanosleep(uint64_t i_seconds, uint64_t i_nano_seconds)
+{
+ struct timespec ts;
+
+ ts.tv_sec = i_seconds;
+ ts.tv_nsec = i_nano_seconds;
+ nanosleep(&ts, NULL);
+}
+
+static int hservice_set_special_wakeup(struct cpu_thread *cpu)
+{
+ uint64_t val, core_id, poll_target, stamp;
+ int rc;
+
+ /*
+ * Note: HWP checks for checkstops, but I assume we don't need to
+ * as we wouldn't be running if one was present
+ */
+
+ /* Grab core ID once */
+ core_id = pir_to_core_id(cpu->pir);
+
+ /*
+ * The original HWp reads the XSCOM first but ignores the result
+ * and error, let's do the same until I know for sure that is
+ * not neccessary
+ */
+ xscom_read(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
+ &val);
+
+ /* Then we write special wakeup */
+ rc = xscom_write(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id,
+ EX_PM_SPECIAL_WAKEUP_PHYP),
+ PPC_BIT(0));
+ if (rc) {
+ prerror("HBRT: XSCOM error %d asserting special"
+ " wakeup on 0x%x\n", rc, cpu->pir);
+ return rc;
+ }
+
+ /*
+ * HWP uses the history for Perf register here, dunno why it uses
+ * that one instead of the pHyp one, maybe to avoid clobbering it...
+ *
+ * In any case, it does that to check for run/nap vs.sleep/winkle/other
+ * to decide whether to poll on checkstop or not. Since we don't deal
+ * with checkstop conditions here, we ignore that part.
+ */
+
+ /*
+ * Now poll for completion of special wakeup. The HWP is nasty here,
+ * it will poll at 5ms intervals for up to 200ms. This is not quite
+ * acceptable for us at runtime, at least not until we have the
+ * ability to "context switch" HBRT. In practice, because we don't
+ * winkle, it will never take that long, so we increase the polling
+ * frequency to 1us per poll. However we do have to keep the same
+ * timeout.
+ *
+ * We don't use time_wait_ms() either for now as we don't want to
+ * poll the FSP here.
+ */
+ stamp = mftb();
+ poll_target = stamp + msecs_to_tb(200);
+ val = 0;
+ while (!(val & EX_PM_GP0_SPECIAL_WAKEUP_DONE)) {
+ /* Wait 1 us */
+ hservice_nanosleep(0, 1000);
+
+ /* Read PM state */
+ rc = xscom_read(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_GP0),
+ &val);
+ if (rc) {
+ prerror("HBRT: XSCOM error %d reading PM state on"
+ " 0x%x\n", rc, cpu->pir);
+ return rc;
+ }
+ /* Check timeout */
+ if (mftb() > poll_target)
+ break;
+ }
+
+ /* Success ? */
+ if (val & EX_PM_GP0_SPECIAL_WAKEUP_DONE) {
+ uint64_t now = mftb();
+ printf("HBRT: Special wakeup complete after %ld us\n",
+ tb_to_usecs(now - stamp));
+ return 0;
+ }
+
+ /*
+ * We timed out ...
+ *
+ * HWP has a complex workaround for HW255321 which affects
+ * Murano DD1 and Venice DD1. Ignore that for now
+ *
+ * Instead we just dump some XSCOMs for error logging
+ */
+ prerror("HBRT: Timeout on special wakeup of 0x%0x\n", cpu->pir);
+ prerror("HBRT: PM0 = 0x%016llx\n", val);
+ val = -1;
+ xscom_read(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
+ &val);
+ prerror("HBRT: SPC_WKUP = 0x%016llx\n", val);
+ val = -1;
+ xscom_read(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id,
+ EX_PM_IDLE_STATE_HISTORY_PHYP),
+ &val);
+ prerror("HBRT: HISTORY = 0x%016llx\n", val);
+
+ return OPAL_HARDWARE;
+}
+
+static int hservice_clr_special_wakeup(struct cpu_thread *cpu)
+{
+ uint64_t val, core_id;
+ int rc;
+
+ /*
+ * Note: HWP checks for checkstops, but I assume we don't need to
+ * as we wouldn't be running if one was present
+ */
+
+ /* Grab core ID once */
+ core_id = pir_to_core_id(cpu->pir);
+
+ /*
+ * The original HWp reads the XSCOM first but ignores the result
+ * and error, let's do the same until I know for sure that is
+ * not neccessary
+ */
+ xscom_read(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
+ &val);
+
+ /* Then we write special wakeup */
+ rc = xscom_write(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id,
+ EX_PM_SPECIAL_WAKEUP_PHYP), 0);
+ if (rc) {
+ prerror("HBRT: XSCOM error %d deasserting"
+ " special wakeup on 0x%x\n", rc, cpu->pir);
+ return rc;
+ }
+
+ /*
+ * The original HWp reads the XSCOM again with the comment
+ * "This puts an inherent delay in the propagation of the reset
+ * transition"
+ */
+ xscom_read(cpu->chip_id,
+ XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP),
+ &val);
+
+ return 0;
+}
+
+static int hservice_wakeup(uint32_t i_core, uint32_t i_mode)
+{
+ struct cpu_thread *cpu;
+ int rc = OPAL_SUCCESS;
+
+ /*
+ * Mask out the top nibble of i_core since it may contain
+ * 0x4 (which we use for XSCOM targetting)
+ */
+ i_core &= 0x0fffffff;
+
+ /* What do we need to do ? */
+ switch(i_mode) {
+ case 0: /* Assert special wakeup */
+ /* XXX Assume P8 */
+ cpu = find_cpu_by_pir(i_core << 3);
+ if (!cpu)
+ return OPAL_PARAMETER;
+ printf("HBRT: Special wakeup assert for core 0x%x, count=%d\n",
+ i_core, cpu->hbrt_spec_wakeup);
+ if (cpu->hbrt_spec_wakeup == 0)
+ rc = hservice_set_special_wakeup(cpu);
+ if (rc == 0)
+ cpu->hbrt_spec_wakeup++;
+ return rc;
+ case 1: /* Deassert special wakeup */
+ /* XXX Assume P8 */
+ cpu = find_cpu_by_pir(i_core << 3);
+ if (!cpu)
+ return OPAL_PARAMETER;
+ printf("HBRT: Special wakeup release for core 0x%x, count=%d\n",
+ i_core, cpu->hbrt_spec_wakeup);
+ if (cpu->hbrt_spec_wakeup == 0) {
+ prerror("HBRT: Special wakeup clear"
+ " on core 0x%x with count=0\n",
+ i_core);
+ return OPAL_WRONG_STATE;
+ }
+ /* What to do with count on errors ? */
+ cpu->hbrt_spec_wakeup--;
+ if (cpu->hbrt_spec_wakeup == 0)
+ rc = hservice_clr_special_wakeup(cpu);
+ return rc;
+ case 2: /* Clear all special wakeups */
+ printf("HBRT: Special wakeup release for all cores\n");
+ for_each_cpu(cpu) {
+ if (cpu->hbrt_spec_wakeup) {
+ cpu->hbrt_spec_wakeup = 0;
+ /* What to do on errors ? */
+ hservice_clr_special_wakeup(cpu);
+ }
+ }
+ return OPAL_SUCCESS;
+ default:
+ return OPAL_PARAMETER;
+ }
+}
+
+static struct host_interfaces hinterface = {
+ .interface_version = HOSTBOOT_RUNTIME_INTERFACE_VERSION,
+ .puts = hservice_puts,
+ .assert = hservice_assert,
+ .malloc = hservice_malloc,
+ .free = hservice_free,
+ .realloc = hservice_realloc,
+ .send_error_log = hservice_send_error_log,
+ .scom_read = hservice_scom_read,
+ .scom_write = hservice_scom_write,
+ .lid_load = hservice_lid_load,
+ .lid_unload = hservice_lid_unload,
+ .get_reserved_mem = hservice_get_reserved_mem,
+ .wakeup = hservice_wakeup,
+ .nanosleep = hservice_nanosleep,
+};
+
+int host_services_occ_load(void)
+{
+ struct proc_chip *chip;
+ int rc = 0;
+
+ printf("HBRT: OCC Load requested\n");
+
+ if (!(hservice_runtime && hservice_runtime->loadOCC)) {
+ prerror("HBRT: No hservice_runtime->loadOCC\n");
+ return -ENOENT;
+ }
+
+ for_each_chip(chip) {
+
+ printf("HBRT: [%16lx] Calling loadOCC() homer %016llx, occ_common_area %016llx, "
+ "chip %04x\n",
+ mftb(),
+ chip->homer_base,
+ chip->occ_common_base,
+ chip->id);
+
+ rc = hservice_runtime->loadOCC(chip->homer_base,
+ chip->homer_base,
+ chip->occ_common_base,
+ chip->occ_common_base,
+ chip->id);
+
+ hservice_mark();
+ printf("HBRT: [%16lx] -> rc = %d\n", mftb(), rc);
+ }
+ return rc;
+}
+
+int host_services_occ_start(void)
+{
+ struct proc_chip *chip;
+ int i, rc = 0, nr_chips=0;
+ uint64_t chipids[MAX_CHIPS];
+
+ printf("HBRT: OCC Start requested\n");
+
+ if (!(hservice_runtime && hservice_runtime->startOCCs)) {
+ prerror("HBRT: No hservice_runtime->startOCCs\n");
+ return -ENOENT;
+ }
+
+ for_each_chip(chip) {
+ chipids[nr_chips++] = chip->id;
+ }
+
+ printf("HBRT: [%16lx] Calling startOCC() for IDs: ", mftb());
+ for (i = 0; i < nr_chips; i++)
+ printf("%04llx ", chipids[i]);
+ printf("\n");
+
+ /* Lets start all OCC */
+ rc = hservice_runtime->startOCCs(chipids, nr_chips);
+ hservice_mark();
+ printf("HBRT: [%16lx] -> rc = %d\n", mftb(), rc);
+ return rc;
+}
+
+void host_services_occ_base_setup(void)
+{
+ struct proc_chip *chip;
+ uint64_t occ_common;
+
+ chip = next_chip(NULL); /* Frist chip */
+ occ_common = (uint64_t) local_alloc(chip->id, OCC_COMMON_SIZE, OCC_COMMON_SIZE);
+
+ for_each_chip(chip) {
+ chip->occ_common_base = occ_common;
+ chip->occ_common_size = OCC_COMMON_SIZE;
+
+ chip->homer_base = (uint64_t) local_alloc(chip->id, HOMER_IMAGE_SIZE,
+ HOMER_IMAGE_SIZE);
+ chip->homer_size = HOMER_IMAGE_SIZE;
+ memset((void *)chip->homer_base, 0, chip->homer_size);
+
+ printf("HBRT: Chip %d HOMER base %016llx : %08llx "
+ "OCC common base %016llx : %08llx\n",
+ chip->id, chip->homer_base, chip->homer_size,
+ chip->occ_common_base, chip->occ_common_size);
+ }
+}
+
+bool hservices_init(void)
+{
+ void *code = NULL;
+ struct runtime_interfaces *(*hbrt_init)(struct host_interfaces *);
+
+ struct function_descriptor {
+ void *addr;
+ void *toc;
+ } fdesc;
+
+ code = (void *)hservice_get_reserved_mem("ibm,hbrt-code-image");
+ if (!code) {
+ prerror("HBRT: No ibm,hbrt-code-image found.\n");
+ return false;
+ }
+
+ if (memcmp(code, "HBRTVERS", 8) != 0) {
+ prerror("HBRT: Bad eyecatcher for ibm,hbrt-code-image!\n");
+ return false;
+ }
+
+ printf("HBRT: Found HostBoot Runtime version %llu\n", ((u64 *)code)[1]);
+
+ /* We enter at 0x100 into the image. */
+ fdesc.addr = code + 0x100;
+ /* It doesn't care about TOC */
+ fdesc.toc = 0;
+
+ hbrt_init = (void *)&fdesc;
+
+ hservice_runtime = hbrt_init(&hinterface);
+ hservice_mark();
+ if (!hservice_runtime) {
+ prerror("HBRT: Host services init failed\n");
+ return false;
+ }
+
+ printf("HBRT: Interface version %llu\n",
+ hservice_runtime->interface_version);
+
+ return true;
+}
diff --git a/core/init.c b/core/init.c
new file mode 100644
index 0000000..3d72ce5
--- /dev/null
+++ b/core/init.c
@@ -0,0 +1,687 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <fsp-sysparam.h>
+#include <psi.h>
+#include <memory.h>
+#include <chiptod.h>
+#include <nx.h>
+#include <cpu.h>
+#include <processor.h>
+#include <xscom.h>
+#include <device_tree.h>
+#include <opal.h>
+#include <opal-msg.h>
+#include <elf.h>
+#include <io.h>
+#include <cec.h>
+#include <device.h>
+#include <pci.h>
+#include <lpc.h>
+#include <chip.h>
+#include <interrupts.h>
+#include <mem_region.h>
+#include <trace.h>
+#include <console.h>
+#include <fsi-master.h>
+#include <centaur.h>
+#include <libfdt/libfdt.h>
+#include <hostservices.h>
+
+/*
+ * Boot semaphore, incremented by each CPU calling in
+ *
+ * Forced into data section as it will be used before BSS is initialized
+ */
+enum ipl_state ipl_state = ipl_initial;
+enum proc_gen proc_gen;
+
+static uint64_t kernel_entry;
+static bool kernel_32bit;
+static void *fdt;
+
+struct debug_descriptor debug_descriptor = {
+ .eye_catcher = "OPALdbug",
+ .version = DEBUG_DESC_VERSION,
+ .memcons_phys = (uint64_t)&memcons,
+ .trace_mask = 0, /* All traces disabled by default */
+};
+
+static bool try_load_elf64_le(struct elf_hdr *header)
+{
+ struct elf64_hdr *kh = (struct elf64_hdr *)header;
+ uint64_t load_base = (uint64_t)kh;
+ struct elf64_phdr *ph;
+ unsigned int i;
+
+ printf("INIT: 64-bit LE kernel discovered\n");
+
+ /* Look for a loadable program header that has our entry in it
+ *
+ * Note that we execute the kernel in-place, we don't actually
+ * obey the load informations in the headers. This is expected
+ * to work for the Linux Kernel because it's a fairly dumb ELF
+ * but it will not work for any ELF binary.
+ */
+ ph = (struct elf64_phdr *)(load_base + le64_to_cpu(kh->e_phoff));
+ for (i = 0; i < le16_to_cpu(kh->e_phnum); i++, ph++) {
+ if (le32_to_cpu(ph->p_type) != ELF_PTYPE_LOAD)
+ continue;
+ if (le64_to_cpu(ph->p_vaddr) > le64_to_cpu(kh->e_entry) ||
+ (le64_to_cpu(ph->p_vaddr) + le64_to_cpu(ph->p_memsz)) <
+ le64_to_cpu(kh->e_entry))
+ continue;
+
+ /* Get our entry */
+ kernel_entry = le64_to_cpu(kh->e_entry) -
+ le64_to_cpu(ph->p_vaddr) + le64_to_cpu(ph->p_offset);
+ break;
+ }
+
+ if (!kernel_entry) {
+ prerror("INIT: Failed to find kernel entry !\n");
+ return false;
+ }
+ kernel_entry += load_base;
+ kernel_32bit = false;
+
+ printf("INIT: 64-bit kernel entry at 0x%llx\n", kernel_entry);
+
+ return true;
+}
+
+static bool try_load_elf64(struct elf_hdr *header)
+{
+ struct elf64_hdr *kh = (struct elf64_hdr *)header;
+ uint64_t load_base = (uint64_t)kh;
+ struct elf64_phdr *ph;
+ unsigned int i;
+
+ /* Check it's a ppc64 LE ELF */
+ if (kh->ei_ident == ELF_IDENT &&
+ kh->ei_data == ELF_DATA_LSB &&
+ kh->e_machine == le16_to_cpu(ELF_MACH_PPC64)) {
+ return try_load_elf64_le(header);
+ }
+
+ /* Check it's a ppc64 ELF */
+ if (kh->ei_ident != ELF_IDENT ||
+ kh->ei_data != ELF_DATA_MSB ||
+ kh->e_machine != ELF_MACH_PPC64) {
+ prerror("INIT: Kernel doesn't look like an ppc64 ELF\n");
+ return false;
+ }
+
+ /* Look for a loadable program header that has our entry in it
+ *
+ * Note that we execute the kernel in-place, we don't actually
+ * obey the load informations in the headers. This is expected
+ * to work for the Linux Kernel because it's a fairly dumb ELF
+ * but it will not work for any ELF binary.
+ */
+ ph = (struct elf64_phdr *)(load_base + kh->e_phoff);
+ for (i = 0; i < kh->e_phnum; i++, ph++) {
+ if (ph->p_type != ELF_PTYPE_LOAD)
+ continue;
+ if (ph->p_vaddr > kh->e_entry ||
+ (ph->p_vaddr + ph->p_memsz) < kh->e_entry)
+ continue;
+
+ /* Get our entry */
+ kernel_entry = kh->e_entry - ph->p_vaddr + ph->p_offset;
+ break;
+ }
+
+ if (!kernel_entry) {
+ prerror("INIT: Failed to find kernel entry !\n");
+ return false;
+ }
+ kernel_entry += load_base;
+ kernel_32bit = false;
+
+ printf("INIT: 64-bit kernel entry at 0x%llx\n", kernel_entry);
+
+ return true;
+}
+
+static bool try_load_elf32_le(struct elf_hdr *header)
+{
+ struct elf32_hdr *kh = (struct elf32_hdr *)header;
+ uint64_t load_base = (uint64_t)kh;
+ struct elf32_phdr *ph;
+ unsigned int i;
+
+ printf("INIT: 32-bit LE kernel discovered\n");
+
+ /* Look for a loadable program header that has our entry in it
+ *
+ * Note that we execute the kernel in-place, we don't actually
+ * obey the load informations in the headers. This is expected
+ * to work for the Linux Kernel because it's a fairly dumb ELF
+ * but it will not work for any ELF binary.
+ */
+ ph = (struct elf32_phdr *)(load_base + le32_to_cpu(kh->e_phoff));
+ for (i = 0; i < le16_to_cpu(kh->e_phnum); i++, ph++) {
+ if (le32_to_cpu(ph->p_type) != ELF_PTYPE_LOAD)
+ continue;
+ if (le32_to_cpu(ph->p_vaddr) > le32_to_cpu(kh->e_entry) ||
+ (le32_to_cpu(ph->p_vaddr) + le32_to_cpu(ph->p_memsz)) <
+ le32_to_cpu(kh->e_entry))
+ continue;
+
+ /* Get our entry */
+ kernel_entry = le32_to_cpu(kh->e_entry) -
+ le32_to_cpu(ph->p_vaddr) + le32_to_cpu(ph->p_offset);
+ break;
+ }
+
+ if (!kernel_entry) {
+ prerror("INIT: Failed to find kernel entry !\n");
+ return false;
+ }
+
+ kernel_entry += load_base;
+ kernel_32bit = true;
+
+ printf("INIT: 32-bit kernel entry at 0x%llx\n", kernel_entry);
+
+ return true;
+}
+
+static bool try_load_elf32(struct elf_hdr *header)
+{
+ struct elf32_hdr *kh = (struct elf32_hdr *)header;
+ uint64_t load_base = (uint64_t)kh;
+ struct elf32_phdr *ph;
+ unsigned int i;
+
+ /* Check it's a ppc32 LE ELF */
+ if (header->ei_ident == ELF_IDENT &&
+ header->ei_data == ELF_DATA_LSB &&
+ header->e_machine == le16_to_cpu(ELF_MACH_PPC32)) {
+ return try_load_elf32_le(header);
+ }
+
+ /* Check it's a ppc32 ELF */
+ if (header->ei_ident != ELF_IDENT ||
+ header->ei_data != ELF_DATA_MSB ||
+ header->e_machine != ELF_MACH_PPC32) {
+ prerror("INIT: Kernel doesn't look like an ppc32 ELF\n");
+ return false;
+ }
+
+ /* Look for a loadable program header that has our entry in it
+ *
+ * Note that we execute the kernel in-place, we don't actually
+ * obey the load informations in the headers. This is expected
+ * to work for the Linux Kernel because it's a fairly dumb ELF
+ * but it will not work for any ELF binary.
+ */
+ ph = (struct elf32_phdr *)(load_base + kh->e_phoff);
+ for (i = 0; i < kh->e_phnum; i++, ph++) {
+ if (ph->p_type != ELF_PTYPE_LOAD)
+ continue;
+ if (ph->p_vaddr > kh->e_entry ||
+ (ph->p_vaddr + ph->p_memsz) < kh->e_entry)
+ continue;
+
+ /* Get our entry */
+ kernel_entry = kh->e_entry - ph->p_vaddr + ph->p_offset;
+ break;
+ }
+
+ if (!kernel_entry) {
+ prerror("INIT: Failed to find kernel entry !\n");
+ return false;
+ }
+
+ kernel_entry += load_base;
+ kernel_32bit = true;
+
+ printf("INIT: 32-bit kernel entry at 0x%llx\n", kernel_entry);
+
+ return true;
+}
+
+/* LID numbers. For now we hijack some of pHyp's own until i figure
+ * out the whole business with the MasterLID
+ */
+#define KERNEL_LID_PHYP 0x80a00701
+#define KERNEL_LID_OPAL 0x80f00101
+
+extern char __builtin_kernel_start[];
+extern char __builtin_kernel_end[];
+extern uint64_t boot_offset;
+
+static bool load_kernel(void)
+{
+ struct elf_hdr *kh;
+ uint32_t lid;
+ size_t ksize;
+ const char *ltype;
+
+ ltype = dt_prop_get_def(dt_root, "lid-type", NULL);
+
+ /* No lid-type, assume stradale, currently pre-loaded at fixed
+ * address
+ */
+ if (!ltype) {
+ printf("No lid-type property, assuming FSP-less setup\n");
+ ksize = __builtin_kernel_end - __builtin_kernel_start;
+ if (ksize) {
+ /* Move the built-in kernel up */
+ uint64_t builtin_base =
+ ((uint64_t)__builtin_kernel_start) -
+ SKIBOOT_BASE + boot_offset;
+ printf("Using built-in kernel\n");
+ memmove(KERNEL_LOAD_BASE, (void*)builtin_base, ksize);
+ } else
+ printf("Assuming kernel at 0x%p\n", KERNEL_LOAD_BASE);
+ } else {
+ ksize = KERNEL_LOAD_SIZE;
+
+ /* First try to load an OPAL secondary LID always */
+ lid = fsp_adjust_lid_side(KERNEL_LID_OPAL);
+ printf("Trying to load OPAL secondary LID...\n");
+ if (fsp_fetch_data(0, FSP_DATASET_NONSP_LID, lid, 0,
+ KERNEL_LOAD_BASE, &ksize) != 0) {
+ if (!strcmp(ltype, "opal")) {
+ prerror("Failed to load in OPAL mode...\n");
+ return false;
+ }
+ printf("Trying to load as PHYP LID...\n");
+ lid = fsp_adjust_lid_side(KERNEL_LID_PHYP);
+ ksize = KERNEL_LOAD_SIZE;
+ if (fsp_fetch_data(0, FSP_DATASET_NONSP_LID, lid, 0,
+ KERNEL_LOAD_BASE, &ksize) != 0) {
+ prerror("Failed to load kernel\n");
+ return false;
+ }
+ }
+ }
+
+ printf("INIT: Kernel loaded, size: %zu bytes (0 = unknown preload)\n",
+ ksize);
+
+ kh = (struct elf_hdr *)KERNEL_LOAD_BASE;
+ if (kh->ei_class == ELF_CLASS_64)
+ return try_load_elf64(kh);
+ else if (kh->ei_class == ELF_CLASS_32)
+ return try_load_elf32(kh);
+
+ printf("INIT: Neither ELF32 not ELF64 ?\n");
+ return false;
+}
+
+void __noreturn load_and_boot_kernel(bool is_reboot)
+{
+ const struct dt_property *memprop;
+ uint64_t mem_top;
+
+ memprop = dt_find_property(dt_root, DT_PRIVATE "maxmem");
+ if (memprop)
+ mem_top = (u64)dt_property_get_cell(memprop, 0) << 32
+ | dt_property_get_cell(memprop, 1);
+ else /* XXX HB hack, might want to calc it */
+ mem_top = 0x40000000;
+
+ op_display(OP_LOG, OP_MOD_INIT, 0x000A);
+
+ /* Load kernel LID */
+ if (!load_kernel()) {
+ op_display(OP_FATAL, OP_MOD_INIT, 1);
+ abort();
+ }
+
+ if (!is_reboot) {
+ /* We wait for the nvram read to complete here so we can
+ * grab stuff from there such as the kernel arguments
+ */
+ fsp_nvram_wait_open();
+
+ /* Wait for FW VPD data read to complete */
+ fsp_code_update_wait_vpd(true);
+ }
+ fsp_console_select_stdout();
+
+ /*
+ * OCC takes few secs to boot. Call this as late as
+ * as possible to avoid delay.
+ */
+ occ_pstates_init();
+
+ /* Set kernel command line argument if specified */
+#ifdef KERNEL_COMMAND_LINE
+ dt_add_property_string(dt_chosen, "bootargs", KERNEL_COMMAND_LINE);
+#endif
+
+ op_display(OP_LOG, OP_MOD_INIT, 0x000B);
+
+ /* Create the device tree blob to boot OS. */
+ fdt = create_dtb(dt_root);
+ if (!fdt) {
+ op_display(OP_FATAL, OP_MOD_INIT, 2);
+ abort();
+ }
+
+ op_display(OP_LOG, OP_MOD_INIT, 0x000C);
+
+ /* Start the kernel */
+ if (!is_reboot)
+ op_panel_disable_src_echo();
+
+ /* Clear SRCs on the op-panel when Linux starts */
+ op_panel_clear_src();
+
+ cpu_give_self_os();
+
+ printf("INIT: Starting kernel at 0x%llx, fdt at %p (size 0x%x)\n",
+ kernel_entry, fdt, fdt_totalsize(fdt));
+
+ fdt_set_boot_cpuid_phys(fdt, this_cpu()->pir);
+ if (kernel_32bit)
+ start_kernel32(kernel_entry, fdt, mem_top);
+ start_kernel(kernel_entry, fdt, mem_top);
+}
+
+static void dt_fixups(void)
+{
+ struct dt_node *n;
+ struct dt_node *primary_lpc = NULL;
+
+ /* lpc node missing #address/size cells. Also pick one as
+ * primary for now (TBD: How to convey that from HB)
+ */
+ dt_for_each_compatible(dt_root, n, "ibm,power8-lpc") {
+ if (!primary_lpc || dt_has_node_property(n, "primary", NULL))
+ primary_lpc = n;
+ if (dt_has_node_property(n, "#address-cells", NULL))
+ break;
+ dt_add_property_cells(n, "#address-cells", 2);
+ dt_add_property_cells(n, "#size-cells", 1);
+ dt_add_property_strings(n, "status", "ok");
+ }
+
+ /* Missing "primary" property in LPC bus */
+ if (primary_lpc && !dt_has_node_property(primary_lpc, "primary", NULL))
+ dt_add_property(primary_lpc, "primary", NULL, 0);
+
+ /* Missing "scom-controller" */
+ dt_for_each_compatible(dt_root, n, "ibm,xscom") {
+ if (!dt_has_node_property(n, "scom-controller", NULL))
+ dt_add_property(n, "scom-controller", NULL, 0);
+ }
+}
+
+static void add_arch_vector(void)
+{
+ /**
+ * vec5 = a PVR-list : Number-of-option-vectors :
+ * option-vectors[Number-of-option-vectors + 1]
+ */
+ uint8_t vec5[] = {0x05, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00};
+
+ if (dt_has_node_property(dt_chosen, "ibm,architecture-vec-5", NULL))
+ return;
+
+ dt_add_property(dt_chosen, "ibm,architecture-vec-5",
+ vec5, sizeof(vec5));
+}
+
+static void dt_init_misc(void)
+{
+ /* Check if there's a /chosen node, if not, add one */
+ dt_chosen = dt_find_by_path(dt_root, "/chosen");
+ if (!dt_chosen)
+ dt_chosen = dt_new(dt_root, "chosen");
+ assert(dt_chosen);
+
+ /* Add IBM architecture vectors if needed */
+ add_arch_vector();
+
+ /* Add the "OPAL virtual ICS*/
+ add_ics_node();
+
+ /* Additional fixups. TODO: Move into platform */
+ dt_fixups();
+}
+
+/* Called from head.S, thus no prototype. */
+void main_cpu_entry(const void *fdt, u32 master_cpu);
+
+void __noreturn main_cpu_entry(const void *fdt, u32 master_cpu)
+{
+ /*
+ * WARNING: At this point. the timebases have
+ * *not* been synchronized yet. Do not use any timebase
+ * related functions for timeouts etc... unless you can cope
+ * with the speed being some random core clock divider and
+ * the value jumping backward when the synchronization actually
+ * happens (in chiptod_init() below).
+ *
+ * Also the current cpu_thread() struct is not initialized
+ * either so we need to clear it out first thing first (without
+ * putting any other useful info in there jus yet) otherwise
+ * printf an locks are going to play funny games with "con_suspend"
+ */
+ pre_init_boot_cpu();
+
+ /*
+ * Before first printk, ensure console buffer is clear or
+ * reading tools might think it has wrapped
+ */
+ clear_console();
+
+ printf("SkiBoot %s starting...\n", gitid);
+
+ /* Initialize boot cpu's cpu_thread struct */
+ init_boot_cpu();
+
+ /* Now locks can be used */
+ init_locks();
+
+ /* Create the OPAL call table early on, entries can be overridden
+ * later on (FSP console code for example)
+ */
+ opal_table_init();
+
+ /*
+ * If we are coming in with a flat device-tree, we expand it
+ * now. Else look for HDAT and create a device-tree from them
+ *
+ * Hack alert: When entering via the OPAL entry point, fdt
+ * is set to -1, we record that and pass it to parse_hdat
+ */
+ if (fdt == (void *)-1ul)
+ parse_hdat(true, master_cpu);
+ else if (fdt == NULL)
+ parse_hdat(false, master_cpu);
+ else {
+ dt_expand(fdt);
+ }
+
+ /*
+ * From there, we follow a fairly strict initialization order.
+ *
+ * First we need to build up our chip data structures and initialize
+ * XSCOM which will be needed for a number of susbequent things.
+ *
+ * We want XSCOM available as early as the platform probe in case the
+ * probe requires some HW accesses.
+ *
+ * We also initialize the FSI master at that point in case we need
+ * to access chips via that path early on.
+ */
+ init_chips();
+ xscom_init();
+ mfsi_init();
+
+ /*
+ * Put various bits & pieces in device-tree that might not
+ * already be there such as the /chosen node if not there yet,
+ * the ICS node, etc... This can potentially use XSCOM
+ */
+ dt_init_misc();
+
+ /*
+ * Initialize LPC (P8 only) so we can get to UART, BMC and
+ * other system controller. This is done before probe_platform
+ * so that the platform probing code can access an external
+ * BMC if needed.
+ */
+ lpc_init();
+
+ /*
+ * Now, we init our memory map from the device-tree, and immediately
+ * reserve areas which we know might contain data coming from
+ * HostBoot. We need to do these things before we start doing
+ * allocations outside of our heap, such as chip local allocs,
+ * otherwise we might clobber those data.
+ */
+ mem_region_init();
+
+ /* Reserve HOMER and OCC area */
+ homer_init();
+
+ /* Initialize host services. */
+ hservices_init();
+
+ /*
+ * We probe the platform now. This means the platform probe gets
+ * the opportunity to reserve additional areas of memory if needed.
+ *
+ * Note: Timebases still not synchronized.
+ */
+ probe_platform();
+
+ /* Initialize the rest of the cpu thread structs */
+ init_all_cpus();
+
+ /* Add the /opal node to the device-tree */
+ add_opal_node();
+
+ /* Allocate our split trace buffers now. Depends add_opal_node() */
+ init_trace_buffers();
+
+ /* Get the ICPs and make sure they are in a sane state */
+ init_interrupts();
+
+ /* Grab centaurs from device-tree if present (only on FSP-less) */
+ centaur_init();
+
+ /* Initialize PSI (depends on probe_platform being called) */
+ psi_init();
+
+ /* Call in secondary CPUs */
+ cpu_bringup();
+
+ /*
+ * Sycnhronize time bases. Thi resets all the TB values to a small
+ * value (so they appear to go backward at this point), and synchronize
+ * all core timebases to the global ChipTOD network
+ */
+ chiptod_init(master_cpu);
+
+ /*
+ * We have initialized the basic HW, we can now call into the
+ * platform to perform subsequent inits, such as establishing
+ * communication with the FSP.
+ */
+ if (platform.init)
+ platform.init();
+
+ /* Init SLW related stuff, including fastsleep */
+ slw_init();
+
+ op_display(OP_LOG, OP_MOD_INIT, 0x0002);
+
+ /* Read in NVRAM and set it up */
+ nvram_init();
+
+ /* NX init */
+ nx_init();
+
+ /* Initialize the opal messaging */
+ opal_init_msg();
+
+ /* Probe IO hubs */
+ probe_p5ioc2();
+ probe_p7ioc();
+
+ /* Probe PHB3 on P8 */
+ probe_phb3();
+
+ /* Initialize PCI */
+ pci_init_slots();
+
+ /*
+ * These last few things must be done as late as possible
+ * because they rely on various other things having been setup,
+ * for example, add_opal_interrupts() will add all the interrupt
+ * sources that are going to the firmware. We can't add a new one
+ * after that call. Similarily, the mem_region calls will construct
+ * the reserve maps in the DT so we shouldn't affect the memory
+ * regions after that
+ */
+
+ /* Add the list of interrupts going to OPAL */
+ add_opal_interrupts();
+
+ /* Now release parts of memory nodes we haven't used ourselves... */
+ mem_region_release_unused();
+
+ /* ... and add remaining reservations to the DT */
+ mem_region_add_dt_reserved();
+
+ load_and_boot_kernel(false);
+}
+
+void __noreturn __secondary_cpu_entry(void)
+{
+ struct cpu_thread *cpu = this_cpu();
+
+ /* Secondary CPU called in */
+ cpu_callin(cpu);
+
+ /* Wait for work to do */
+ while(true) {
+ int i;
+
+ /* Process pending jobs on this processor */
+ cpu_process_jobs();
+
+ /* Relax a bit to give the simulator some breathing space */
+ i = 1000;
+ while (--i)
+ smt_very_low();
+ smt_low();
+ }
+}
+
+/* Called from head.S, thus no prototype. */
+void secondary_cpu_entry(void);
+
+void __noreturn secondary_cpu_entry(void)
+{
+ struct cpu_thread *cpu = this_cpu();
+
+ printf("INIT: CPU PIR 0x%04x called in\n", cpu->pir);
+
+ __secondary_cpu_entry();
+}
+
diff --git a/core/interrupts.c b/core/interrupts.c
new file mode 100644
index 0000000..cabebc2
--- /dev/null
+++ b/core/interrupts.c
@@ -0,0 +1,332 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <cpu.h>
+#include <fsp.h>
+#include <interrupts.h>
+#include <opal.h>
+#include <io.h>
+#include <cec.h>
+#include <device.h>
+#include <ccan/str/str.h>
+
+/* ICP registers */
+#define ICP_XIRR 0x4 /* 32-bit access */
+#define ICP_CPPR 0x4 /* 8-bit access */
+#define ICP_MFRR 0xc /* 8-bit access */
+
+struct irq_source {
+ uint32_t start;
+ uint32_t end;
+ const struct irq_source_ops *ops;
+ void *data;
+ struct list_node link;
+};
+
+static LIST_HEAD(irq_sources);
+static struct lock irq_lock = LOCK_UNLOCKED;
+
+void register_irq_source(const struct irq_source_ops *ops, void *data,
+ uint32_t start, uint32_t count)
+{
+ struct irq_source *is, *is1;
+
+ is = zalloc(sizeof(struct irq_source));
+ assert(is);
+ is->start = start;
+ is->end = start + count;
+ is->ops = ops;
+ is->data = data;
+
+ printf("IRQ: Registering %04x..%04x ops @%p (data %p) %s\n",
+ start, start + count - 1, ops, data,
+ ops->interrupt ? "[Internal]" : "[OS]");
+
+ lock(&irq_lock);
+ list_for_each(&irq_sources, is1, link) {
+ if (is->end > is1->start && is->start < is1->end) {
+ prerror("register IRQ source overlap !\n");
+ prerror(" new: %x..%x old: %x..%x\n",
+ is->start, is->end - 1,
+ is1->start, is1->end - 1);
+ assert(0);
+ }
+ }
+ list_add_tail(&irq_sources, &is->link);
+ unlock(&irq_lock);
+}
+
+void unregister_irq_source(uint32_t start, uint32_t count)
+{
+ struct irq_source *is;
+
+ lock(&irq_lock);
+ list_for_each(&irq_sources, is, link) {
+ if (start >= is->start && start < is->end) {
+ if (start != is->start ||
+ count != (is->end - is->start)) {
+ prerror("unregister IRQ source mismatch !\n");
+ prerror("start:%x, count: %x match: %x..%x\n",
+ start, count, is->start, is->end);
+ assert(0);
+ }
+ list_del(&is->link);
+ unlock(&irq_lock);
+ /* XXX Add synchronize / RCU */
+ free(is);
+ return;
+ }
+ }
+ unlock(&irq_lock);
+ prerror("unregister IRQ source not found !\n");
+ prerror("start:%x, count: %x\n", start, count);
+ assert(0);
+}
+
+/*
+ * This takes a 6-bit chip id and returns a 20 bit value representing
+ * the PSI interrupt. This includes all the fields above, ie, is a
+ * global interrupt number.
+ *
+ * For P8, this returns the base of the 8-interrupts block for PSI
+ */
+uint32_t get_psi_interrupt(uint32_t chip_id)
+{
+ uint32_t irq;
+
+ switch(proc_gen) {
+ case proc_gen_p7:
+ /* Get the chip ID into position, it already has
+ * the T bit so all we need is room for the GX
+ * bit, 9 bit BUID and 4 bit level
+ */
+ irq = chip_id << (1 + 9 + 4);
+
+ /* Add in the BUID */
+ irq |= P7_PSI_IRQ_BUID << 4;
+ break;
+ case proc_gen_p8:
+ irq = P8_CHIP_IRQ_BLOCK_BASE(chip_id, P8_IRQ_BLOCK_MISC);
+ irq += P8_IRQ_MISC_PSI_BASE;
+ break;
+ default:
+ assert(false);
+ };
+
+ return irq;
+}
+
+
+struct dt_node *add_ics_node(void)
+{
+ struct dt_node *ics = dt_new_addr(dt_root, "interrupt-controller", 0);
+ if (!ics)
+ return NULL;
+
+ dt_add_property_cells(ics, "reg", 0, 0, 0, 0);
+ dt_add_property_strings(ics, "compatible", "IBM,ppc-xics",
+ "IBM,opal-xics");
+ dt_add_property_cells(ics, "#address-cells", 0);
+ dt_add_property_cells(ics, "#interrupt-cells", 1);
+ dt_add_property_string(ics, "device_type",
+ "PowerPC-Interrupt-Source-Controller");
+ dt_add_property(ics, "interrupt-controller", NULL, 0);
+
+ return ics;
+}
+
+uint32_t get_ics_phandle(void)
+{
+ struct dt_node *i;
+
+ for (i = dt_first(dt_root); i; i = dt_next(dt_root, i)) {
+ if (streq(i->name, "interrupt-controller@0")) {
+ return i->phandle;
+ }
+ }
+ abort();
+}
+
+void add_opal_interrupts(void)
+{
+ struct irq_source *is;
+ unsigned int i, count = 0;
+ uint32_t *irqs = NULL, isn;
+
+ lock(&irq_lock);
+ list_for_each(&irq_sources, is, link) {
+ /*
+ * Add a source to opal-interrupts if it has an
+ * ->interrupt callback
+ */
+ if (!is->ops->interrupt)
+ continue;
+ for (isn = is->start; isn < is->end; isn++) {
+ i = count++;
+ irqs = realloc(irqs, 4 * count);
+ irqs[i] = isn;
+ }
+ }
+ unlock(&irq_lock);
+
+ /* The opal-interrupts property has one cell per interrupt,
+ * it is not a standard interrupt property
+ */
+ if (irqs)
+ dt_add_property(opal_node, "opal-interrupts", irqs, count * 4);
+}
+
+/*
+ * This is called at init time (and one fast reboot) to sanitize the
+ * ICP. We set our priority to 0 to mask all interrupts and make sure
+ * no IPI is on the way.
+ */
+void reset_cpu_icp(void)
+{
+ void *icp = this_cpu()->icp_regs;
+
+ assert(icp);
+
+ /* Clear pending IPIs */
+ out_8(icp + ICP_MFRR, 0xff);
+
+ /* Set priority to max, ignore all incoming interrupts, EOI IPIs */
+ out_be32(icp + ICP_XIRR, 2);
+}
+
+/* Used by the PSI code to send an EOI during reset. This will also
+ * set the CPPR to 0 which should already be the case anyway
+ */
+void icp_send_eoi(uint32_t interrupt)
+{
+ void *icp = this_cpu()->icp_regs;
+
+ assert(icp);
+
+ /* Set priority to max, ignore all incoming interrupts */
+ out_be32(icp + ICP_XIRR, interrupt & 0xffffff);
+}
+
+/* This is called before winkle, we clear pending IPIs and set our priority
+ * to 1 to mask all but the IPI
+ */
+void icp_prep_for_rvwinkle(void)
+{
+ void *icp = this_cpu()->icp_regs;
+
+ assert(icp);
+
+ /* Clear pending IPIs */
+ out_8(icp + ICP_MFRR, 0xff);
+
+ /* Set priority to 1, ignore all incoming interrupts, EOI IPIs */
+ out_be32(icp + ICP_XIRR, 0x01000002);
+}
+
+/* This is called to wakeup somebody from winkle */
+void icp_kick_cpu(struct cpu_thread *cpu)
+{
+ void *icp = cpu->icp_regs;
+
+ assert(icp);
+
+ /* Send high priority IPI */
+ out_8(icp + ICP_MFRR, 0);
+}
+
+static struct irq_source *irq_find_source(uint32_t isn)
+{
+ struct irq_source *is;
+
+ lock(&irq_lock);
+ list_for_each(&irq_sources, is, link) {
+ if (isn >= is->start && isn < is->end) {
+ unlock(&irq_lock);
+ return is;
+ }
+ }
+ unlock(&irq_lock);
+
+ return NULL;
+}
+
+static int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority)
+{
+ struct irq_source *is = irq_find_source(isn);
+
+ if (!is || !is->ops->set_xive)
+ return OPAL_PARAMETER;
+
+ return is->ops->set_xive(is->data, isn, server, priority);
+}
+opal_call(OPAL_SET_XIVE, opal_set_xive, 3);
+
+static int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority)
+{
+ struct irq_source *is = irq_find_source(isn);
+
+ if (!is || !is->ops->get_xive)
+ return OPAL_PARAMETER;
+
+ return is->ops->get_xive(is->data, isn, server, priority);
+}
+opal_call(OPAL_GET_XIVE, opal_get_xive, 3);
+
+static int64_t opal_handle_interrupt(uint32_t isn, uint64_t *outstanding_event_mask)
+{
+ struct irq_source *is = irq_find_source(isn);
+ int64_t rc = OPAL_SUCCESS;
+
+ if (!is || !is->ops->interrupt) {
+ rc = OPAL_PARAMETER;
+ goto bail;
+ }
+
+ is->ops->interrupt(is->data, isn);
+
+ /* Update output events */
+ bail:
+ if (outstanding_event_mask)
+ *outstanding_event_mask = opal_pending_events;
+
+ return rc;
+}
+opal_call(OPAL_HANDLE_INTERRUPT, opal_handle_interrupt, 2);
+
+void init_interrupts(void)
+{
+ struct dt_node *icp;
+ const struct dt_property *sranges;
+ struct cpu_thread *cpu;
+ u32 base, count, i;
+ u64 addr, size;
+
+ dt_for_each_compatible(dt_root, icp, "ibm,ppc-xicp") {
+ sranges = dt_require_property(icp,
+ "ibm,interrupt-server-ranges",
+ -1);
+ base = dt_get_number(sranges->prop, 1);
+ count = dt_get_number(sranges->prop + 4, 1);
+ for (i = 0; i < count; i++) {
+ addr = dt_get_address(icp, i, &size);
+ cpu = find_cpu_by_server(base + i);
+ if (cpu)
+ cpu->icp_regs = (void *)addr;
+ }
+ }
+}
+
diff --git a/core/lock.c b/core/lock.c
new file mode 100644
index 0000000..fc4bf6b
--- /dev/null
+++ b/core/lock.c
@@ -0,0 +1,125 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <lock.h>
+#include <assert.h>
+#include <processor.h>
+#include <cpu.h>
+#include <console.h>
+
+/* Set to bust locks. Note, this is initialized to true because our
+ * lock debugging code is not going to work until we have the per
+ * CPU data initialized
+ */
+bool bust_locks = true;
+
+#ifdef DEBUG_LOCKS
+
+static void lock_error(struct lock *l, const char *reason, uint16_t err)
+{
+ op_display(OP_FATAL, OP_MOD_LOCK, err);
+
+ fprintf(stderr, "LOCK ERROR: %s @%p (state: 0x%016lx)\n",
+ reason, l, l->lock_val);
+ abort();
+}
+
+static void lock_check(struct lock *l)
+{
+ if ((l->lock_val & 1) && (l->lock_val >> 32) == this_cpu()->pir)
+ lock_error(l, "Invalid recursive lock", 0);
+}
+
+static void unlock_check(struct lock *l)
+{
+ if (!(l->lock_val & 1))
+ lock_error(l, "Unlocking unlocked lock", 1);
+
+ if ((l->lock_val >> 32) != this_cpu()->pir)
+ lock_error(l, "Unlocked non-owned lock", 2);
+
+ if (l->in_con_path && this_cpu()->con_suspend == 0)
+ lock_error(l, "Unlock con lock with console not suspended", 3);
+}
+
+#else
+static inline void lock_check(struct lock *l) { };
+static inline void unlock_check(struct lock *l) { };
+#endif /* DEBUG_LOCKS */
+
+
+bool try_lock(struct lock *l)
+{
+ if (__try_lock(l)) {
+ if (l->in_con_path)
+ this_cpu()->con_suspend++;
+ return true;
+ }
+ return false;
+}
+
+void lock(struct lock *l)
+{
+ if (bust_locks)
+ return;
+
+ lock_check(l);
+ for (;;) {
+ if (try_lock(l))
+ break;
+ smt_low();
+ }
+ smt_medium();
+}
+
+void unlock(struct lock *l)
+{
+ struct cpu_thread *cpu = this_cpu();
+
+ if (bust_locks)
+ return;
+
+ unlock_check(l);
+
+ lwsync();
+ l->lock_val = 0;
+
+ if (l->in_con_path) {
+ cpu->con_suspend--;
+ if (cpu->con_suspend == 0 && cpu->con_need_flush)
+ flush_console();
+ }
+}
+
+bool lock_recursive(struct lock *l)
+{
+ if (bust_locks)
+ return false;
+
+ if ((l->lock_val & 1) &&
+ (l->lock_val >> 32) == this_cpu()->pir)
+ return false;
+
+ lock(l);
+ return true;
+}
+
+
+void init_locks(void)
+{
+ bust_locks = false;
+}
diff --git a/core/malloc.c b/core/malloc.c
new file mode 100644
index 0000000..692a501
--- /dev/null
+++ b/core/malloc.c
@@ -0,0 +1,84 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Wrappers for malloc, et. al. */
+#include <mem_region.h>
+#include <lock.h>
+#include <string.h>
+#include <mem_region-malloc.h>
+
+#define DEFAULT_ALIGN __alignof__(long)
+
+void *__memalign(size_t blocksize, size_t bytes, const char *location)
+{
+ void *p;
+
+ lock(&mem_region_lock);
+ p = mem_alloc(&skiboot_heap, bytes, blocksize, location);
+ unlock(&mem_region_lock);
+
+ return p;
+}
+
+void *__malloc(size_t bytes, const char *location)
+{
+ return __memalign(DEFAULT_ALIGN, bytes, location);
+}
+
+void __free(void *p, const char *location)
+{
+ lock(&mem_region_lock);
+ mem_free(&skiboot_heap, p, location);
+ unlock(&mem_region_lock);
+}
+
+void *__realloc(void *ptr, size_t size, const char *location)
+{
+ void *newptr;
+
+ /* Two classic malloc corner cases. */
+ if (!size) {
+ __free(ptr, location);
+ return NULL;
+ }
+ if (!ptr)
+ return __malloc(size, location);
+
+ lock(&mem_region_lock);
+ if (mem_resize(&skiboot_heap, ptr, size, location)) {
+ newptr = ptr;
+ } else {
+ newptr = mem_alloc(&skiboot_heap, size, DEFAULT_ALIGN,
+ location);
+ if (newptr) {
+ size_t copy = mem_size(&skiboot_heap, ptr);
+ if (copy > size)
+ copy = size;
+ memcpy(newptr, ptr, copy);
+ mem_free(&skiboot_heap, ptr, location);
+ }
+ }
+ unlock(&mem_region_lock);
+ return newptr;
+}
+
+void *__zalloc(size_t bytes, const char *location)
+{
+ void *p = __malloc(bytes, location);
+
+ if (p)
+ memset(p, 0, bytes);
+ return p;
+}
diff --git a/core/mem_region.c b/core/mem_region.c
new file mode 100644
index 0000000..8904a18
--- /dev/null
+++ b/core/mem_region.c
@@ -0,0 +1,956 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <mem-map.h>
+#include <libfdt_env.h>
+#include <lock.h>
+#include <device.h>
+#include <cpu.h>
+#include <affinity.h>
+#include <types.h>
+#include <mem_region.h>
+#include <mem_region-malloc.h>
+
+/* Memory poisoning on free (if POISON_MEM_REGION set to 1) */
+#define POISON_MEM_REGION 0
+#define POISON_MEM_REGION_WITH 0x99
+#define POISON_MEM_REGION_LIMIT 1*1024*1024*1024
+
+struct lock mem_region_lock = LOCK_UNLOCKED;
+
+static struct list_head regions = LIST_HEAD_INIT(regions);
+
+static struct mem_region skiboot_os_reserve = {
+ .name = "ibm,os-reserve",
+ .start = 0,
+ .len = SKIBOOT_BASE,
+ .type = REGION_OS,
+};
+
+struct mem_region skiboot_heap = {
+ .name = "ibm,firmware-heap",
+ .start = HEAP_BASE,
+ .len = HEAP_SIZE,
+ .type = REGION_SKIBOOT_HEAP,
+};
+
+static struct mem_region skiboot_code_and_text = {
+ .name = "ibm,firmware-code",
+ .start = SKIBOOT_BASE,
+ .len = HEAP_BASE - SKIBOOT_BASE,
+ .type = REGION_SKIBOOT_FIRMWARE,
+};
+
+static struct mem_region skiboot_after_heap = {
+ .name = "ibm,firmware-data",
+ .start = HEAP_BASE + HEAP_SIZE,
+ .len = SKIBOOT_BASE + SKIBOOT_SIZE - (HEAP_BASE + HEAP_SIZE),
+ .type = REGION_SKIBOOT_FIRMWARE,
+};
+
+static struct mem_region skiboot_cpu_stacks = {
+ .name = "ibm,firmware-stacks",
+ .start = CPU_STACKS_BASE,
+ .len = 0, /* TBA */
+ .type = REGION_SKIBOOT_FIRMWARE,
+};
+
+struct alloc_hdr {
+ bool free : 1;
+ bool prev_free : 1;
+ unsigned long num_longs : BITS_PER_LONG-2; /* Including header. */
+ const char *location;
+};
+
+struct free_hdr {
+ struct alloc_hdr hdr;
+ struct list_node list;
+ /* ... unsigned long tailer; */
+};
+
+#define ALLOC_HDR_LONGS (sizeof(struct alloc_hdr) / sizeof(long))
+#define ALLOC_MIN_LONGS (sizeof(struct free_hdr) / sizeof(long) + 1)
+
+/* Avoid ugly casts. */
+static void *region_start(const struct mem_region *region)
+{
+ return (void *)(unsigned long)region->start;
+}
+
+/* Each free block has a tailer, so we can walk backwards. */
+static unsigned long *tailer(struct free_hdr *f)
+{
+ return (unsigned long *)f + f->hdr.num_longs - 1;
+}
+
+/* This walks forward to the next hdr (or NULL if at the end). */
+static struct alloc_hdr *next_hdr(const struct mem_region *region,
+ const struct alloc_hdr *hdr)
+{
+ void *next;
+
+ next = ((unsigned long *)hdr + hdr->num_longs);
+ if (next >= region_start(region) + region->len)
+ next = NULL;
+ return next;
+}
+
+/* Creates free block covering entire region. */
+static void init_allocatable_region(struct mem_region *region)
+{
+ struct free_hdr *f = region_start(region);
+ assert(region->type == REGION_SKIBOOT_HEAP);
+ f->hdr.num_longs = region->len / sizeof(long);
+ f->hdr.free = true;
+ f->hdr.prev_free = false;
+ *tailer(f) = f->hdr.num_longs;
+ list_head_init(&region->free_list);
+ list_add(&region->free_list, &f->list);
+}
+
+static void make_free(struct mem_region *region, struct free_hdr *f,
+ const char *location)
+{
+ struct alloc_hdr *next;
+#if POISON_MEM_REGION == 1
+ size_t poison_size= (void*)tailer(f) - (void*)(f+1);
+
+ /* We only poison up to a limit, as otherwise boot is kinda slow */
+ if (poison_size > POISON_MEM_REGION_LIMIT) {
+ poison_size = POISON_MEM_REGION_LIMIT;
+ }
+
+ memset(f+1, POISON_MEM_REGION_WITH, poison_size);
+#endif
+ if (f->hdr.prev_free) {
+ struct free_hdr *prev;
+ unsigned long *prev_tailer = (unsigned long *)f - 1;
+
+ assert(*prev_tailer);
+ prev = (void *)((unsigned long *)f - *prev_tailer);
+ assert(prev->hdr.free);
+ assert(!prev->hdr.prev_free);
+
+ /* Expand to cover the one we just freed. */
+ prev->hdr.num_longs += f->hdr.num_longs;
+ f = prev;
+ } else {
+ f->hdr.free = true;
+ f->hdr.location = location;
+ list_add(&region->free_list, &f->list);
+ }
+
+ /* Fix up tailer. */
+ *tailer(f) = f->hdr.num_longs;
+
+ /* If next is free, coalesce it */
+ next = next_hdr(region, &f->hdr);
+ if (next) {
+ next->prev_free = true;
+ if (next->free) {
+ struct free_hdr *next_free = (void *)next;
+ list_del_from(&region->free_list, &next_free->list);
+ /* Maximum of one level of recursion */
+ make_free(region, next_free, location);
+ }
+ }
+}
+
+/* Can we fit this many longs with this alignment in this free block? */
+static bool fits(struct free_hdr *f, size_t longs, size_t align, size_t *offset)
+{
+ *offset = 0;
+
+ while (f->hdr.num_longs >= *offset + longs) {
+ size_t addr;
+
+ addr = (unsigned long)f
+ + (*offset + ALLOC_HDR_LONGS) * sizeof(long);
+ if ((addr & (align - 1)) == 0)
+ return true;
+
+ /* Don't make tiny chunks! */
+ if (*offset == 0)
+ *offset = ALLOC_MIN_LONGS;
+ else
+ (*offset)++;
+ }
+ return false;
+}
+
+static void discard_excess(struct mem_region *region,
+ struct alloc_hdr *hdr, size_t alloc_longs,
+ const char *location)
+{
+ /* Do we have excess? */
+ if (hdr->num_longs > alloc_longs + ALLOC_MIN_LONGS) {
+ struct free_hdr *post;
+
+ /* Set up post block. */
+ post = (void *)hdr + alloc_longs * sizeof(long);
+ post->hdr.num_longs = hdr->num_longs - alloc_longs;
+ post->hdr.prev_free = false;
+
+ /* Trim our block. */
+ hdr->num_longs = alloc_longs;
+
+ /* This coalesces as required. */
+ make_free(region, post, location);
+ }
+}
+
+static const char *hdr_location(const struct alloc_hdr *hdr)
+{
+ /* Corrupt: step carefully! */
+ if (is_rodata(hdr->location))
+ return hdr->location;
+ return "*CORRUPT*";
+}
+
+static void bad_header(const struct mem_region *region,
+ const struct alloc_hdr *hdr,
+ const char *during,
+ const char *location)
+{
+ /* Corrupt: step carefully! */
+ if (is_rodata(hdr->location))
+ prerror("%p (in %s) %s at %s, previously %s\n",
+ hdr-1, region->name, during, location, hdr->location);
+ else
+ prerror("%p (in %s) %s at %s, previously %p\n",
+ hdr-1, region->name, during, location, hdr->location);
+ abort();
+}
+
+static bool region_is_reserved(struct mem_region *region)
+{
+ return region->type != REGION_OS;
+}
+
+static void mem_dump_allocs(void)
+{
+ struct mem_region *region;
+ struct alloc_hdr *hdr;
+
+ /* Second pass: populate property data */
+ printf("Memory regions:\n");
+ list_for_each(&regions, region, list) {
+ if (region->type != REGION_SKIBOOT_HEAP)
+ continue;
+ printf(" 0x%012llx..%012llx : %s\n",
+ (long long)region->start,
+ (long long)(region->start + region->len - 1),
+ region->name);
+ if (region->free_list.n.next == NULL) {
+ printf(" no allocs\n");
+ continue;
+ }
+ for (hdr = region_start(region); hdr; hdr = next_hdr(region, hdr)) {
+ if (hdr->free)
+ continue;
+ printf(" 0x%.8lx %s\n", hdr->num_longs * sizeof(long),
+ hdr_location(hdr));
+ }
+ }
+}
+
+static void *__mem_alloc(struct mem_region *region, size_t size, size_t align,
+ const char *location)
+{
+ size_t alloc_longs, offset;
+ struct free_hdr *f;
+ struct alloc_hdr *next;
+
+ /* Align must be power of 2. */
+ assert(!((align - 1) & align));
+
+ /* This should be a constant. */
+ assert(is_rodata(location));
+
+ /* Unallocatable region? */
+ if (region->type != REGION_SKIBOOT_HEAP)
+ return NULL;
+
+ /* First allocation? */
+ if (region->free_list.n.next == NULL)
+ init_allocatable_region(region);
+
+ /* Don't do screwy sizes. */
+ if (size > region->len)
+ return NULL;
+
+ /* Don't do tiny alignments, we deal in long increments. */
+ if (align < sizeof(long))
+ align = sizeof(long);
+
+ /* Convert size to number of longs, too. */
+ alloc_longs = (size + sizeof(long)-1) / sizeof(long) + ALLOC_HDR_LONGS;
+
+ /* Can't be too small for when we free it, either. */
+ if (alloc_longs < ALLOC_MIN_LONGS)
+ alloc_longs = ALLOC_MIN_LONGS;
+
+ /* Walk free list. */
+ list_for_each(&region->free_list, f, list) {
+ /* We may have to skip some to meet alignment. */
+ if (fits(f, alloc_longs, align, &offset))
+ goto found;
+ }
+
+ return NULL;
+
+found:
+ assert(f->hdr.free);
+ assert(!f->hdr.prev_free);
+
+ /* This block is no longer free. */
+ list_del_from(&region->free_list, &f->list);
+ f->hdr.free = false;
+ f->hdr.location = location;
+
+ next = next_hdr(region, &f->hdr);
+ if (next) {
+ assert(next->prev_free);
+ next->prev_free = false;
+ }
+
+ if (offset != 0) {
+ struct free_hdr *pre = f;
+
+ f = (void *)f + offset * sizeof(long);
+ assert(f >= pre + 1);
+
+ /* Set up new header. */
+ f->hdr.num_longs = pre->hdr.num_longs - offset;
+ /* f->hdr.prev_free will be set by make_free below. */
+ f->hdr.free = false;
+ f->hdr.location = location;
+
+ /* Fix up old header. */
+ pre->hdr.num_longs = offset;
+ pre->hdr.prev_free = false;
+
+ /* This coalesces as required. */
+ make_free(region, pre, location);
+ }
+
+ /* We might be too long; put the rest back. */
+ discard_excess(region, &f->hdr, alloc_longs, location);
+
+ /* Clear tailer for debugging */
+ *tailer(f) = 0;
+
+ /* Their pointer is immediately after header. */
+ return &f->hdr + 1;
+}
+
+void *mem_alloc(struct mem_region *region, size_t size, size_t align,
+ const char *location)
+{
+ void *r = __mem_alloc(region, size, align, location);
+
+ if (r)
+ return r;
+
+ prerror("mem_alloc(0x%lx, 0x%lx, \"%s\") failed !\n",
+ size, align, location);
+ mem_dump_allocs();
+ return NULL;
+}
+
+void mem_free(struct mem_region *region, void *mem, const char *location)
+{
+ struct alloc_hdr *hdr;
+
+ /* This should be a constant. */
+ assert(is_rodata(location));
+
+ /* Freeing NULL is always a noop. */
+ if (!mem)
+ return;
+
+ /* Your memory is in the region, right? */
+ assert(mem >= region_start(region) + sizeof(*hdr));
+ assert(mem < region_start(region) + region->len);
+
+ /* Grab header. */
+ hdr = mem - sizeof(*hdr);
+
+ if (hdr->free)
+ bad_header(region, hdr, "re-freed", location);
+
+ make_free(region, (struct free_hdr *)hdr, location);
+}
+
+size_t mem_size(const struct mem_region *region __unused, const void *ptr)
+{
+ const struct alloc_hdr *hdr = ptr - sizeof(*hdr);
+ return hdr->num_longs * sizeof(long);
+}
+
+bool mem_resize(struct mem_region *region, void *mem, size_t len,
+ const char *location)
+{
+ struct alloc_hdr *hdr, *next;
+ struct free_hdr *f;
+
+ /* This should be a constant. */
+ assert(is_rodata(location));
+
+ /* Get header. */
+ hdr = mem - sizeof(*hdr);
+ if (hdr->free)
+ bad_header(region, hdr, "resize", location);
+
+ /* Round up size to multiple of longs. */
+ len = (sizeof(*hdr) + len + sizeof(long) - 1) / sizeof(long);
+
+ /* Can't be too small for when we free it, either. */
+ if (len < ALLOC_MIN_LONGS)
+ len = ALLOC_MIN_LONGS;
+
+ /* Shrinking is simple. */
+ if (len <= hdr->num_longs) {
+ hdr->location = location;
+ discard_excess(region, hdr, len, location);
+ return true;
+ }
+
+ /* Check if we can expand. */
+ next = next_hdr(region, hdr);
+ if (!next || !next->free || hdr->num_longs + next->num_longs < len)
+ return false;
+
+ /* OK, it's free and big enough, absorb it. */
+ f = (struct free_hdr *)next;
+ list_del_from(&region->free_list, &f->list);
+ hdr->num_longs += next->num_longs;
+ hdr->location = location;
+
+ /* Update next prev_free */
+ next = next_hdr(region, &f->hdr);
+ if (next) {
+ assert(next->prev_free);
+ next->prev_free = false;
+ }
+
+ /* Clear tailer for debugging */
+ *tailer(f) = 0;
+
+ /* Now we might have *too* much. */
+ discard_excess(region, hdr, len, location);
+ return true;
+}
+
+bool mem_check(const struct mem_region *region)
+{
+ size_t frees = 0;
+ struct alloc_hdr *hdr, *prev_free = NULL;
+ struct free_hdr *f;
+
+ /* Check it's sanely aligned. */
+ if (region->start % sizeof(struct alloc_hdr)) {
+ prerror("Region '%s' not sanely aligned (%llx)\n",
+ region->name, (unsigned long long)region->start);
+ return false;
+ }
+ if ((long)region->len % sizeof(struct alloc_hdr)) {
+ prerror("Region '%s' not sane length (%llu)\n",
+ region->name, (unsigned long long)region->len);
+ return false;
+ }
+
+ /* Not ours to play with, or empty? Don't do anything. */
+ if (region->type != REGION_SKIBOOT_HEAP ||
+ region->free_list.n.next == NULL)
+ return true;
+
+ /* Walk linearly. */
+ for (hdr = region_start(region); hdr; hdr = next_hdr(region, hdr)) {
+ if (hdr->num_longs < ALLOC_MIN_LONGS) {
+ prerror("Region '%s' %s %p (%s) size %zu\n",
+ region->name, hdr->free ? "free" : "alloc",
+ hdr, hdr_location(hdr),
+ hdr->num_longs * sizeof(long));
+ return false;
+ }
+ if ((unsigned long)hdr + hdr->num_longs * sizeof(long) >
+ region->start + region->len) {
+ prerror("Region '%s' %s %p (%s) oversize %zu\n",
+ region->name, hdr->free ? "free" : "alloc",
+ hdr, hdr_location(hdr),
+ hdr->num_longs * sizeof(long));
+ return false;
+ }
+ if (hdr->free) {
+ if (hdr->prev_free || prev_free) {
+ prerror("Region '%s' free %p (%s) has prev_free"
+ " %p (%s) %sset?\n",
+ region->name, hdr, hdr_location(hdr),
+ prev_free,
+ prev_free ? hdr_location(prev_free)
+ : "NULL",
+ hdr->prev_free ? "" : "un");
+ return false;
+ }
+ prev_free = hdr;
+ frees ^= (unsigned long)hdr - region->start;
+ } else {
+ if (hdr->prev_free != (bool)prev_free) {
+ prerror("Region '%s' alloc %p (%s) has"
+ " prev_free %p %sset?\n",
+ region->name, hdr, hdr_location(hdr),
+ prev_free, hdr->prev_free ? "" : "un");
+ return false;
+ }
+ prev_free = NULL;
+ }
+ }
+
+ /* Now walk free list. */
+ list_for_each(&region->free_list, f, list)
+ frees ^= (unsigned long)f - region->start;
+
+ if (frees) {
+ prerror("Region '%s' free list and walk do not match!\n",
+ region->name);
+ return false;
+ }
+ return true;
+}
+
+static struct mem_region *new_region(const char *name,
+ uint64_t start, uint64_t len,
+ struct dt_node *mem_node,
+ enum mem_region_type type)
+{
+ struct mem_region *region;
+
+ /* Avoid lock recursion, call mem_alloc directly. */
+ region = mem_alloc(&skiboot_heap,
+ sizeof(*region), __alignof__(*region), __location__);
+ if (!region)
+ return NULL;
+
+ region->name = name;
+ region->start = start;
+ region->len = len;
+ region->mem_node = mem_node;
+ region->type = type;
+ region->free_list.n.next = NULL;
+
+ return region;
+}
+
+/* We always split regions, so we only have to replace one. */
+static struct mem_region *split_region(struct mem_region *head,
+ uint64_t split_at,
+ enum mem_region_type type)
+{
+ struct mem_region *tail;
+ uint64_t end = head->start + head->len;
+
+ tail = new_region(head->name, split_at, end - split_at,
+ head->mem_node, type);
+ /* Original region becomes head. */
+ if (tail)
+ head->len -= tail->len;
+
+ return tail;
+}
+
+static bool intersects(const struct mem_region *region, uint64_t addr)
+{
+ return addr > region->start &&
+ addr < region->start + region->len;
+}
+
+static bool maybe_split(struct mem_region *r, uint64_t split_at)
+{
+ struct mem_region *tail;
+
+ if (!intersects(r, split_at))
+ return true;
+
+ tail = split_region(r, split_at, r->type);
+ if (!tail)
+ return false;
+
+ /* Tail add is important: we may need to split again! */
+ list_add_tail(&regions, &tail->list);
+ return true;
+}
+
+static bool overlaps(const struct mem_region *r1, const struct mem_region *r2)
+{
+ return (r1->start + r1->len > r2->start
+ && r1->start < r2->start + r2->len);
+}
+
+static struct mem_region *get_overlap(const struct mem_region *region)
+{
+ struct mem_region *i;
+
+ list_for_each(&regions, i, list) {
+ if (overlaps(region, i))
+ return i;
+ }
+ return NULL;
+}
+
+static bool add_region(struct mem_region *region)
+{
+ struct mem_region *r;
+
+ /* First split any regions which intersect. */
+ list_for_each(&regions, r, list)
+ if (!maybe_split(r, region->start) ||
+ !maybe_split(r, region->start + region->len))
+ return false;
+
+ /* Now we have only whole overlaps, if any. */
+ while ((r = get_overlap(region)) != NULL) {
+ assert(r->start == region->start);
+ assert(r->len == region->len);
+ list_del_from(&regions, &r->list);
+ /* We already hold mem_region lock */
+ mem_free(&skiboot_heap, r, __location__);
+ }
+
+ /* Finally, add in our own region. */
+ list_add(&regions, &region->list);
+ return true;
+}
+
+void mem_reserve(const char *name, uint64_t start, uint64_t len)
+{
+ struct mem_region *region;
+ bool added;
+
+ lock(&mem_region_lock);
+ region = new_region(name, start, len, NULL, REGION_RESERVED);
+ assert(region);
+ added = add_region(region);
+ assert(added);
+ unlock(&mem_region_lock);
+}
+
+static bool matches_chip_id(const __be32 ids[], size_t num, u32 chip_id)
+{
+ size_t i;
+
+ for (i = 0; i < num; i++)
+ if (be32_to_cpu(ids[i]) == chip_id)
+ return true;
+
+ return false;
+}
+
+void *__local_alloc(unsigned int chip_id, size_t size, size_t align,
+ const char *location)
+{
+ struct mem_region *region;
+ void *p = NULL;
+ bool use_local = true;
+
+ lock(&mem_region_lock);
+
+restart:
+ list_for_each(&regions, region, list) {
+ const struct dt_property *prop;
+ const __be32 *ids;
+
+ if (region->type != REGION_SKIBOOT_HEAP)
+ continue;
+
+ /* Don't allocate from normal heap. */
+ if (region == &skiboot_heap)
+ continue;
+
+ /* First pass, only match node local regions */
+ if (use_local) {
+ if (!region->mem_node)
+ continue;
+ prop = dt_find_property(region->mem_node, "ibm,chip-id");
+ ids = (const __be32 *)prop->prop;
+ if (!matches_chip_id(ids, prop->len/sizeof(u32),
+ chip_id))
+ continue;
+ }
+
+ /* Second pass, match anything */
+ p = mem_alloc(region, size, align, location);
+ if (p)
+ break;
+ }
+
+ /*
+ * If we can't allocate the memory block from the expected
+ * node, we bail to any one that can accomodate our request.
+ */
+ if (!p && use_local) {
+ use_local = false;
+ goto restart;
+ }
+
+ unlock(&mem_region_lock);
+
+ return p;
+}
+
+struct mem_region *find_mem_region(const char *name)
+{
+ struct mem_region *region;
+
+ list_for_each(&regions, region, list) {
+ if (streq(region->name, name))
+ return region;
+ }
+ return NULL;
+}
+
+/* Trawl through device tree, create memory regions from nodes. */
+void mem_region_init(void)
+{
+ const struct dt_property *names, *ranges;
+ struct mem_region *region;
+ struct dt_node *i;
+
+ /* Ensure we have no collision between skiboot core and our heap */
+ extern char _end[];
+ BUILD_ASSERT(HEAP_BASE >= (uint64_t)_end);
+
+ /*
+ * Add associativity properties outside of the lock
+ * to avoid recursive locking caused by allocations
+ * done by add_chip_dev_associativity()
+ */
+ dt_for_each_node(dt_root, i) {
+ if (!dt_has_node_property(i, "device_type", "memory"))
+ continue;
+
+ /* Add associativity properties */
+ add_chip_dev_associativity(i);
+ }
+
+ /* Add each memory node. */
+ dt_for_each_node(dt_root, i) {
+ uint64_t start, len;
+ char *rname;
+#define NODE_REGION_PREFIX "ibm,firmware-allocs-"
+
+ if (!dt_has_node_property(i, "device_type", "memory"))
+ continue;
+ rname = zalloc(strlen(i->name) + strlen(NODE_REGION_PREFIX) + 1);
+ strcat(rname, NODE_REGION_PREFIX);
+ strcat(rname, i->name);
+ start = dt_get_address(i, 0, &len);
+ lock(&mem_region_lock);
+ region = new_region(rname, start, len, i, REGION_SKIBOOT_HEAP);
+ if (!region) {
+ prerror("MEM: Could not add mem region %s!\n", i->name);
+ abort();
+ }
+ list_add(&regions, &region->list);
+ unlock(&mem_region_lock);
+ }
+
+ /* Now we know how many CPU stacks we have, fix that up. */
+ skiboot_cpu_stacks.len = (cpu_max_pir + 1) * STACK_SIZE;
+
+ lock(&mem_region_lock);
+
+ /* Now carve out our own reserved areas. */
+ if (!add_region(&skiboot_os_reserve) ||
+ !add_region(&skiboot_code_and_text) ||
+ !add_region(&skiboot_heap) ||
+ !add_region(&skiboot_after_heap) ||
+ !add_region(&skiboot_cpu_stacks)) {
+ prerror("Out of memory adding skiboot reserved areas\n");
+ abort();
+ }
+
+ /* Add reserved ranges from the DT */
+ names = dt_find_property(dt_root, "reserved-names");
+ ranges = dt_find_property(dt_root, "reserved-ranges");
+ if (names && ranges) {
+ const uint64_t *range;
+ int n, len;
+
+ range = (const void *)ranges->prop;
+
+ for (n = 0; n < names->len; n += len, range += 2) {
+ char *name;
+
+ len = strlen(names->prop + n) + 1;
+
+ name = mem_alloc(&skiboot_heap, len,
+ __alignof__(*name), __location__);
+ memcpy(name, names->prop + n, len);
+
+ region = new_region(name,
+ dt_get_number(range, 2),
+ dt_get_number(range + 1, 2),
+ NULL, REGION_RESERVED);
+ list_add(&regions, &region->list);
+ }
+ } else if (names || ranges) {
+ prerror("Invalid properties: reserved-names=%p "
+ "with reserved-ranges=%p\n",
+ names, ranges);
+ abort();
+ }
+
+ unlock(&mem_region_lock);
+
+ /* We generate the reservation properties from our own region list,
+ * which now includes the existing data.
+ */
+ if (names)
+ dt_del_property(dt_root, (struct dt_property *)names);
+ if (ranges)
+ dt_del_property(dt_root, (struct dt_property *)ranges);
+}
+
+static uint64_t allocated_length(const struct mem_region *r)
+{
+ struct free_hdr *f, *last = NULL;
+
+ /* No allocations at all? */
+ if (r->free_list.n.next == NULL)
+ return 0;
+
+ /* Find last free block. */
+ list_for_each(&r->free_list, f, list)
+ if (f > last)
+ last = f;
+
+ /* No free blocks? */
+ if (!last)
+ return r->len;
+
+ /* Last free block isn't at end? */
+ if (next_hdr(r, &last->hdr))
+ return r->len;
+ return (unsigned long)last - r->start;
+}
+
+/* Separate out allocated sections into their own region. */
+void mem_region_release_unused(void)
+{
+ struct mem_region *r;
+
+ lock(&mem_region_lock);
+
+ printf("Releasing unused memory:\n");
+ list_for_each(&regions, r, list) {
+ uint64_t used_len;
+
+ /* If it's not allocatable, ignore it. */
+ if (r->type != REGION_SKIBOOT_HEAP)
+ continue;
+
+ used_len = allocated_length(r);
+
+ printf(" %s: %llu/%llu used\n",
+ r->name, (long long)used_len, (long long)r->len);
+
+ /* We keep the skiboot heap. */
+ if (r == &skiboot_heap)
+ continue;
+
+ /* Nothing used? Whole thing is for Linux. */
+ if (used_len == 0)
+ r->type = REGION_OS;
+ /* Partially used? Split region. */
+ else if (used_len != r->len) {
+ struct mem_region *for_linux;
+ struct free_hdr *last = region_start(r) + used_len;
+
+ /* Remove the final free block. */
+ list_del_from(&r->free_list, &last->list);
+
+ for_linux = split_region(r, r->start + used_len,
+ REGION_OS);
+ if (!for_linux) {
+ prerror("OOM splitting mem node %s for linux\n",
+ r->name);
+ abort();
+ }
+ list_add(&regions, &for_linux->list);
+ }
+ }
+ unlock(&mem_region_lock);
+}
+
+void mem_region_add_dt_reserved(void)
+{
+ int names_len, ranges_len, len;
+ struct mem_region *region;
+ void *names, *ranges;
+ uint64_t *range;
+ char *name;
+
+ names_len = 0;
+ ranges_len = 0;
+
+ lock(&mem_region_lock);
+
+ /* First pass: calculate length of property data */
+ list_for_each(&regions, region, list) {
+ if (!region_is_reserved(region))
+ continue;
+ names_len += strlen(region->name) + 1;
+ ranges_len += 2 * sizeof(uint64_t);
+ }
+
+ /* Allocate property data with mem_alloc; malloc() acquires
+ * mem_region_lock */
+ names = mem_alloc(&skiboot_heap, names_len,
+ __alignof__(*names), __location__);
+ ranges = mem_alloc(&skiboot_heap, ranges_len,
+ __alignof__(*ranges), __location__);
+
+ name = names;
+ range = ranges;
+
+ printf("Reserved regions:\n");
+ /* Second pass: populate property data */
+ list_for_each(&regions, region, list) {
+ if (!region_is_reserved(region))
+ continue;
+ len = strlen(region->name) + 1;
+ memcpy(name, region->name, len);
+ name += len;
+
+ printf(" 0x%012llx..%012llx : %s\n",
+ (long long)region->start,
+ (long long)(region->start + region->len - 1),
+ region->name);
+
+ range[0] = cpu_to_fdt64(region->start);
+ range[1] = cpu_to_fdt64(region->len);
+ range += 2;
+ }
+ unlock(&mem_region_lock);
+
+ dt_add_property(dt_root, "reserved-names", names, names_len);
+ dt_add_property(dt_root, "reserved-ranges", ranges, ranges_len);
+
+ free(names);
+ free(ranges);
+}
diff --git a/core/nvram.c b/core/nvram.c
new file mode 100644
index 0000000..f25d6aa
--- /dev/null
+++ b/core/nvram.c
@@ -0,0 +1,248 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <opal.h>
+#include <lock.h>
+#include <device.h>
+#include <platform.h>
+
+static void *nvram_image;
+static uint32_t nvram_size;
+static bool nvram_ready;
+
+static int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset)
+{
+ if (!nvram_ready)
+ return OPAL_HARDWARE;
+ if (offset >= nvram_size || (offset + size) > nvram_size)
+ return OPAL_PARAMETER;
+
+ memcpy((void *)buffer, nvram_image + offset, size);
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_READ_NVRAM, opal_read_nvram, 3);
+
+static int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset)
+{
+ if (!nvram_ready)
+ return OPAL_HARDWARE;
+ if (offset >= nvram_size || (offset + size) > nvram_size)
+ return OPAL_PARAMETER;
+ memcpy(nvram_image + offset, (void *)buffer, size);
+ if (platform.nvram_write)
+ platform.nvram_write(offset, nvram_image + offset, size);
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_WRITE_NVRAM, opal_write_nvram, 3);
+
+struct chrp_nvram_hdr {
+ uint8_t sig;
+ uint8_t cksum;
+ uint16_t len;
+ char name[12];
+};
+
+#define NVRAM_SIG_FW_PRIV 0x51
+#define NVRAM_SIG_SYSTEM 0x70
+#define NVRAM_SIG_FREE 0x7f
+
+#define NVRAM_NAME_COMMON "common"
+#define NVRAM_NAME_FW_PRIV "ibm,skiboot"
+#define NVRAM_NAME_FREE "wwwwwwwwwwww"
+
+/* 64k should be enough, famous last words... */
+#define NVRAM_SIZE_COMMON 0x10000
+
+/* 4k should be enough, famous last words... */
+#define NVRAM_SIZE_FW_PRIV 0x1000
+
+static uint8_t chrp_nv_cksum(struct chrp_nvram_hdr *hdr)
+{
+ struct chrp_nvram_hdr h_copy = *hdr;
+ uint8_t b_data, i_sum, c_sum;
+ uint8_t *p = (uint8_t *)&h_copy;
+ unsigned int nbytes = sizeof(h_copy);
+
+ h_copy.cksum = 0;
+ for (c_sum = 0; nbytes; nbytes--) {
+ b_data = *(p++);
+ i_sum = c_sum + b_data;
+ if (i_sum < c_sum)
+ i_sum++;
+ c_sum = i_sum;
+ }
+ return c_sum;
+}
+
+static void nvram_format(void)
+{
+ struct chrp_nvram_hdr *h;
+ unsigned int offset = 0;
+
+ prerror("NVRAM: Re-initializing\n");
+ memset(nvram_image, 0, nvram_size);
+
+ /* Create private partition */
+ h = nvram_image + offset;
+ h->sig = NVRAM_SIG_FW_PRIV;
+ h->len = NVRAM_SIZE_FW_PRIV >> 4;
+ strcpy(h->name, NVRAM_NAME_FW_PRIV);
+ h->cksum = chrp_nv_cksum(h);
+ offset += NVRAM_SIZE_FW_PRIV;
+
+ /* Create common partition */
+ h = nvram_image + offset;
+ h->sig = NVRAM_SIG_SYSTEM;
+ h->len = NVRAM_SIZE_COMMON >> 4;
+ strcpy(h->name, NVRAM_NAME_COMMON);
+ h->cksum = chrp_nv_cksum(h);
+ offset += NVRAM_SIZE_COMMON;
+
+ /* Create free space partition */
+ h = nvram_image + offset;
+ h->sig = NVRAM_SIG_FREE;
+ h->len = (nvram_size - offset) >> 4;
+ strncpy(h->name, NVRAM_NAME_FREE, 12);
+ h->cksum = chrp_nv_cksum(h);
+
+ /* Write the whole thing back */
+ if (platform.nvram_write)
+ platform.nvram_write(0, nvram_image, nvram_size);
+}
+
+/*
+ * Check that the nvram partition layout is sane and that it
+ * contains our required partitions. If not, we re-format the
+ * lot of it
+ */
+static void nvram_check(void)
+{
+ unsigned int offset = 0;
+ bool found_common = false;
+ bool found_skiboot = false;
+
+ while (offset + sizeof(struct chrp_nvram_hdr) < nvram_size) {
+ struct chrp_nvram_hdr *h = nvram_image + offset;
+
+ if (chrp_nv_cksum(h) != h->cksum) {
+ prerror("NVRAM: Partition at offset 0x%x"
+ " has bad checksum\n", offset);
+ goto failed;
+ }
+ if (h->len < 1) {
+ prerror("NVRAM: Partition at offset 0x%x"
+ " has incorrect 0 length\n", offset);
+ goto failed;
+ }
+
+ if (h->sig == NVRAM_SIG_SYSTEM &&
+ strcmp(h->name, NVRAM_NAME_COMMON) == 0)
+ found_common = true;
+
+ if (h->sig == NVRAM_SIG_FW_PRIV &&
+ strcmp(h->name, NVRAM_NAME_FW_PRIV) == 0)
+ found_skiboot = true;
+
+ offset += h->len << 4;
+ if (offset > nvram_size) {
+ prerror("NVRAM: Partition at offset 0x%x"
+ " extends beyond end of nvram !\n", offset);
+ goto failed;
+ }
+ }
+ if (!found_common) {
+ prerror("NVRAM: Common partition not found !\n");
+ goto failed;
+ }
+ if (!found_skiboot) {
+ prerror("NVRAM: Skiboot private partition "
+ "not found !\n");
+ goto failed;
+ }
+
+ prerror("NVRAM: Layout appears sane\n");
+ return;
+ failed:
+ nvram_format();
+}
+
+void nvram_read_complete(bool success)
+{
+ struct dt_node *np;
+
+ /* Read not successful, error out and free the buffer */
+ if (!success) {
+ free(nvram_image);
+ nvram_size = 0;
+ return;
+ }
+
+ /* Check and maybe format nvram */
+ nvram_check();
+
+ /* Add nvram node */
+ np = dt_new(opal_node, "nvram");
+ dt_add_property_cells(np, "#bytes", nvram_size);
+ dt_add_property_string(np, "compatible", "ibm,opal-nvram");
+
+ /* Mark ready */
+ nvram_ready = true;
+}
+
+void nvram_init(void)
+{
+ int rc;
+
+ if (!platform.nvram_info)
+ return;
+ rc = platform.nvram_info(&nvram_size);
+ if (rc) {
+ prerror("NVRAM: Error %d retrieving nvram info\n", rc);
+ return;
+ }
+ printf("NVRAM: Size is %d KB\n", nvram_size >> 10);
+ if (nvram_size > 0x100000) {
+ printf("NVRAM: Cropping to 1MB !\n");
+ nvram_size = 0x100000;
+ }
+
+ /*
+ * We allocate the nvram image with 4k alignment to make the
+ * FSP backend job's easier
+ */
+ nvram_image = memalign(0x1000, nvram_size);
+ if (!nvram_image) {
+ prerror("NVRAM: Failed to allocate nvram image\n");
+ nvram_size = 0;
+ return;
+ }
+
+ /* Read it in */
+ rc = platform.nvram_start_read(nvram_image, 0, nvram_size);
+ if (rc) {
+ prerror("NVRAM: Failed to read NVRAM from FSP !\n");
+ nvram_size = 0;
+ free(nvram_image);
+ return;
+ }
+
+ /*
+ * We'll get called back later (or recursively from
+ * nvram_start_read) in nvram_read_complete()
+ */
+}
diff --git a/core/opal-msg.c b/core/opal-msg.c
new file mode 100644
index 0000000..f033b76
--- /dev/null
+++ b/core/opal-msg.c
@@ -0,0 +1,167 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <opal-msg.h>
+#include <lock.h>
+
+#define OPAL_MAX_MSGS (OPAL_MSG_TYPE_MAX + OPAL_MAX_ASYNC_COMP - 1)
+#define OPAL_MSG_PREFIX "opalmsg: "
+
+
+struct opal_msg_entry {
+ struct list_node link;
+ void (*consumed)(void *data);
+ void *data;
+ struct opal_msg msg;
+};
+
+static LIST_HEAD(msg_free_list);
+static LIST_HEAD(msg_pending_list);
+
+static struct lock opal_msg_lock = LOCK_UNLOCKED;
+
+int _opal_queue_msg(enum OpalMessageType msg_type, void *data,
+ void (*consumed)(void *data), size_t num_params,
+ const u64 *params)
+{
+ struct opal_msg_entry *entry;
+
+ lock(&opal_msg_lock);
+
+ entry = list_pop(&msg_free_list, struct opal_msg_entry, link);
+ if (!entry) {
+ prerror(OPAL_MSG_PREFIX "No available node in the free list, allocating\n");
+ entry = zalloc(sizeof(struct opal_msg_entry));
+ if (!entry) {
+ prerror(OPAL_MSG_PREFIX "Allocation failed\n");
+ unlock(&opal_msg_lock);
+ return OPAL_RESOURCE;
+ }
+ }
+
+ entry->consumed = consumed;
+ entry->data = data;
+ entry->msg.msg_type = msg_type;
+
+ if (num_params > ARRAY_SIZE(entry->msg.params)) {
+ prerror(OPAL_MSG_PREFIX "Discarding extra parameters\n");
+ num_params = ARRAY_SIZE(entry->msg.params);
+ }
+ memcpy(entry->msg.params, params, num_params*sizeof(u64));
+
+ list_add_tail(&msg_pending_list, &entry->link);
+ opal_update_pending_evt(OPAL_EVENT_MSG_PENDING,
+ OPAL_EVENT_MSG_PENDING);
+
+ unlock(&opal_msg_lock);
+
+ return 0;
+}
+
+static int64_t opal_get_msg(uint64_t *buffer, uint64_t size)
+{
+ struct opal_msg_entry *entry;
+ void (*callback)(void *data);
+ void *data;
+
+ if (size < sizeof(struct opal_msg) || !buffer)
+ return OPAL_PARAMETER;
+
+ lock(&opal_msg_lock);
+
+ entry = list_pop(&msg_pending_list, struct opal_msg_entry, link);
+ if (!entry) {
+ unlock(&opal_msg_lock);
+ return OPAL_RESOURCE;
+ }
+
+ memcpy(buffer, &entry->msg, sizeof(entry->msg));
+ callback = entry->consumed;
+ data = entry->data;
+
+ list_add(&msg_free_list, &entry->link);
+ if (list_empty(&msg_pending_list))
+ opal_update_pending_evt(OPAL_EVENT_MSG_PENDING, 0);
+
+ unlock(&opal_msg_lock);
+
+ if (callback)
+ callback(data);
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_GET_MSG, opal_get_msg, 2);
+
+static int64_t opal_check_completion(uint64_t *buffer, uint64_t size,
+ uint64_t token)
+{
+ struct opal_msg_entry *entry, *next_entry;
+ void (*callback)(void *data) = NULL;
+ int rc = OPAL_BUSY;
+ void *data = NULL;
+
+ lock(&opal_msg_lock);
+ list_for_each_safe(&msg_pending_list, entry, next_entry, link) {
+ if (entry->msg.msg_type == OPAL_MSG_ASYNC_COMP &&
+ entry->msg.params[0] == token) {
+ list_del(&entry->link);
+ callback = entry->consumed;
+ data = entry->data;
+ list_add(&msg_free_list, &entry->link);
+ if (list_empty(&msg_pending_list))
+ opal_update_pending_evt(OPAL_EVENT_MSG_PENDING,
+ 0);
+ rc = OPAL_SUCCESS;
+ break;
+ }
+ }
+
+ if (rc == OPAL_SUCCESS && size >= sizeof(struct opal_msg))
+ memcpy(buffer, &entry->msg, sizeof(entry->msg));
+
+ unlock(&opal_msg_lock);
+
+ if (callback)
+ callback(data);
+
+ return rc;
+
+}
+opal_call(OPAL_CHECK_ASYNC_COMPLETION, opal_check_completion, 3);
+
+void opal_init_msg(void)
+{
+ struct opal_msg_entry *entry;
+ int i;
+
+ for (i = 0; i < OPAL_MAX_MSGS; i++, entry++) {
+ entry = zalloc(sizeof(*entry));
+ if (!entry)
+ goto err;
+ list_add_tail(&msg_free_list, &entry->link);
+ }
+ return;
+
+err:
+ for (; i > 0; i--) {
+ entry = list_pop(&msg_free_list, struct opal_msg_entry, link);
+ if (entry)
+ free(entry);
+ }
+}
+
diff --git a/core/opal.c b/core/opal.c
new file mode 100644
index 0000000..2727fd5
--- /dev/null
+++ b/core/opal.c
@@ -0,0 +1,308 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <opal.h>
+#include <stack.h>
+#include <lock.h>
+#include <fsp.h>
+#include <cpu.h>
+#include <interrupts.h>
+#include <op-panel.h>
+#include <device.h>
+#include <console.h>
+#include <trace.h>
+#include <timebase.h>
+#include <affinity.h>
+#include <opal-msg.h>
+
+/* Pending events to signal via opal_poll_events */
+uint64_t opal_pending_events;
+
+/* OPAL dispatch table defined in head.S */
+extern uint64_t opal_branch_table[];
+
+/* Number of args expected for each call. */
+static u8 opal_num_args[OPAL_LAST+1];
+
+/* OPAL anchor node */
+struct dt_node *opal_node;
+
+extern uint32_t attn_trigger;
+extern uint32_t hir_trigger;
+
+void opal_table_init(void)
+{
+ struct opal_table_entry *s = __opal_table_start;
+ struct opal_table_entry *e = __opal_table_end;
+
+ printf("OPAL table: %p .. %p, branch table: %p\n",
+ s, e, opal_branch_table);
+ while(s < e) {
+ uint64_t *func = s->func;
+ opal_branch_table[s->token] = *func;
+ opal_num_args[s->token] = s->nargs;
+ s++;
+ }
+}
+
+/* Called from head.S, thus no prototype */
+long opal_bad_token(uint64_t token);
+
+long opal_bad_token(uint64_t token)
+{
+ prerror("OPAL: Called with bad token %lld !\n", token);
+
+ return OPAL_PARAMETER;
+}
+
+/* Called from head.S, thus no prototype */
+void opal_trace_entry(struct stack_frame *eframe);
+
+/* FIXME: Do this in asm */
+void opal_trace_entry(struct stack_frame *eframe)
+{
+ union trace t;
+ unsigned nargs;
+
+ if (this_cpu()->pir != mfspr(SPR_PIR)) {
+ printf("CPU MISMATCH ! PIR=%04lx cpu @%p -> pir=%04x\n",
+ mfspr(SPR_PIR), this_cpu(), this_cpu()->pir);
+ abort();
+ }
+ if (eframe->gpr[0] > OPAL_LAST)
+ nargs = 0;
+ else
+ nargs = opal_num_args[eframe->gpr[0]];
+
+ t.opal.token = eframe->gpr[0];
+ t.opal.lr = eframe->lr;
+ t.opal.sp = eframe->gpr[1];
+ memcpy(t.opal.r3_to_11, &eframe->gpr[3], nargs*sizeof(u64));
+
+ trace_add(&t, TRACE_OPAL, offsetof(struct trace_opal, r3_to_11[nargs]));
+}
+
+void __opal_register(uint64_t token, void *func, unsigned int nargs)
+{
+ uint64_t *opd = func;
+
+ assert(token <= OPAL_LAST);
+
+ opal_branch_table[token] = *opd;
+ opal_num_args[token] = nargs;
+}
+
+static void add_opal_firmware_node(void)
+{
+ struct dt_node *firmware = dt_new(opal_node, "firmware");
+
+ dt_add_property_string(firmware, "compatible", "ibm,opal-firmware");
+ dt_add_property_string(firmware, "name", "firmware");
+ dt_add_property_string(firmware, "git-id", gitid);
+}
+
+void add_opal_node(void)
+{
+ uint64_t base, entry, size;
+ extern uint32_t opal_entry;
+
+ /* XXX TODO: Reorg this. We should create the base OPAL
+ * node early on, and have the various sub modules populate
+ * their own entries (console etc...)
+ *
+ * The logic of which console backend to use should be
+ * extracted
+ */
+
+ entry = (uint64_t)&opal_entry;
+ base = SKIBOOT_BASE;
+ size = (CPU_STACKS_BASE +
+ (cpu_max_pir + 1) * STACK_SIZE) - SKIBOOT_BASE;
+
+ opal_node = dt_new(dt_root, "ibm,opal");
+ dt_add_property_cells(opal_node, "#address-cells", 0);
+ dt_add_property_cells(opal_node, "#size-cells", 0);
+ dt_add_property_strings(opal_node, "compatible", "ibm,opal-v2",
+ "ibm,opal-v3");
+ dt_add_property_cells(opal_node, "opal-msg-async-num", OPAL_MAX_ASYNC_COMP);
+ dt_add_property_cells(opal_node, "opal-msg-size", sizeof(struct opal_msg));
+ dt_add_property_u64(opal_node, "opal-base-address", base);
+ dt_add_property_u64(opal_node, "opal-entry-address", entry);
+ dt_add_property_u64(opal_node, "opal-runtime-size", size);
+
+ add_opal_firmware_node();
+ add_associativity_ref_point();
+ memcons_add_properties();
+ add_cpu_idle_state_properties();
+}
+
+void opal_update_pending_evt(uint64_t evt_mask, uint64_t evt_values)
+{
+ static struct lock evt_lock = LOCK_UNLOCKED;
+ uint64_t new_evts;
+
+ /* XXX FIXME: Use atomics instead ??? Or caller locks (con_lock ?) */
+ lock(&evt_lock);
+ new_evts = (opal_pending_events & ~evt_mask) | evt_values;
+#ifdef OPAL_TRACE_EVT_CHG
+ printf("OPAL: Evt change: 0x%016llx -> 0x%016llx\n",
+ opal_pending_events, new_evts);
+#endif
+ opal_pending_events = new_evts;
+ unlock(&evt_lock);
+}
+
+
+static uint64_t opal_test_func(uint64_t arg)
+{
+ printf("OPAL: Test function called with arg 0x%llx\n", arg);
+
+ return 0xfeedf00d;
+}
+opal_call(OPAL_TEST, opal_test_func, 1);
+
+struct opal_poll_entry {
+ struct list_node link;
+ void (*poller)(void *data);
+ void *data;
+};
+
+static struct list_head opal_pollers = LIST_HEAD_INIT(opal_pollers);
+static struct lock opal_poll_lock = LOCK_UNLOCKED;
+
+void opal_add_poller(void (*poller)(void *data), void *data)
+{
+ struct opal_poll_entry *ent;
+
+ ent = zalloc(sizeof(struct opal_poll_entry));
+ assert(ent);
+ ent->poller = poller;
+ ent->data = data;
+ lock(&opal_poll_lock);
+ list_add_tail(&opal_pollers, &ent->link);
+ unlock(&opal_poll_lock);
+}
+
+void opal_del_poller(void (*poller)(void *data))
+{
+ struct opal_poll_entry *ent;
+
+ lock(&opal_poll_lock);
+ list_for_each(&opal_pollers, ent, link) {
+ if (ent->poller == poller) {
+ list_del(&ent->link);
+ free(ent);
+ break;
+ }
+ }
+ unlock(&opal_poll_lock);
+}
+
+static int64_t opal_poll_events(uint64_t *outstanding_event_mask)
+{
+ struct opal_poll_entry *poll_ent;
+
+ /* Check if we need to trigger an attn for test use */
+ if (attn_trigger == 0xdeadbeef) {
+ printf("Triggering attn\n");
+ assert(false);
+ }
+
+ /* Test the host initiated reset */
+ if (hir_trigger == 0xdeadbeef) {
+ fsp_trigger_reset();
+ hir_trigger = 0;
+ }
+
+ /*
+ * Only run the pollers if they aren't already running
+ * on another CPU
+ */
+ if (try_lock(&opal_poll_lock)) {
+ list_for_each(&opal_pollers, poll_ent, link)
+ poll_ent->poller(poll_ent->data);
+ unlock(&opal_poll_lock);
+ }
+
+ if (outstanding_event_mask)
+ *outstanding_event_mask = opal_pending_events;
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_POLL_EVENTS, opal_poll_events, 1);
+
+static int64_t opal_check_token(uint64_t token)
+{
+ if (token > OPAL_LAST)
+ return OPAL_TOKEN_ABSENT;
+
+ if (opal_branch_table[token])
+ return OPAL_TOKEN_PRESENT;
+
+ return OPAL_TOKEN_ABSENT;
+}
+opal_call(OPAL_CHECK_TOKEN, opal_check_token, 1);
+
+struct opal_sync_entry {
+ struct list_node link;
+ bool (*notify)(void *data);
+ void *data;
+};
+
+static struct list_head opal_syncers = LIST_HEAD_INIT(opal_syncers);
+
+void opal_add_host_sync_notifier(bool (*notify)(void *data), void *data)
+{
+ struct opal_sync_entry *ent;
+
+ ent = zalloc(sizeof(struct opal_sync_entry));
+ assert(ent);
+ ent->notify = notify;
+ ent->data = data;
+ list_add_tail(&opal_syncers, &ent->link);
+}
+
+void opal_del_host_sync_notifier(bool (*notify)(void *data))
+{
+ struct opal_sync_entry *ent;
+
+ list_for_each(&opal_syncers, ent, link) {
+ if (ent->notify == notify) {
+ list_del(&ent->link);
+ free(ent);
+ return;
+ }
+ }
+}
+
+/*
+ * OPAL call to handle host kexec'ing scenario
+ */
+static int64_t opal_sync_host_reboot(void)
+{
+ struct opal_sync_entry *ent;
+ bool ret = true;
+
+ list_for_each(&opal_syncers, ent, link)
+ ret &= ent->notify(ent->data);
+
+ if (ret)
+ return OPAL_SUCCESS;
+ else
+ return OPAL_BUSY_EVENT;
+}
+opal_call(OPAL_SYNC_HOST_REBOOT, opal_sync_host_reboot, 0);
diff --git a/core/pci-opal.c b/core/pci-opal.c
new file mode 100644
index 0000000..ee534cc
--- /dev/null
+++ b/core/pci-opal.c
@@ -0,0 +1,666 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <pci.h>
+#include <pci-cfg.h>
+#include <timebase.h>
+#include <lock.h>
+
+#define OPAL_PCICFG_ACCESS(op, cb, type) \
+static int64_t opal_pci_config_##op(uint64_t phb_id, \
+ uint64_t bus_dev_func, \
+ uint64_t offset, type data) \
+{ \
+ struct phb *phb = pci_get_phb(phb_id); \
+ int64_t rc; \
+ \
+ if (!phb) \
+ return OPAL_PARAMETER; \
+ phb->ops->lock(phb); \
+ rc = phb->ops->cfg_##cb(phb, bus_dev_func, offset, data); \
+ phb->ops->unlock(phb); \
+ pci_put_phb(phb); \
+ \
+ return rc; \
+}
+
+OPAL_PCICFG_ACCESS(read_byte, read8, uint8_t *)
+OPAL_PCICFG_ACCESS(read_half_word, read16, uint16_t *)
+OPAL_PCICFG_ACCESS(read_word, read32, uint32_t *)
+OPAL_PCICFG_ACCESS(write_byte, write8, uint8_t)
+OPAL_PCICFG_ACCESS(write_half_word, write16, uint16_t)
+OPAL_PCICFG_ACCESS(write_word, write32, uint32_t)
+
+opal_call(OPAL_PCI_CONFIG_READ_BYTE, opal_pci_config_read_byte, 4);
+opal_call(OPAL_PCI_CONFIG_READ_HALF_WORD, opal_pci_config_read_half_word, 4);
+opal_call(OPAL_PCI_CONFIG_READ_WORD, opal_pci_config_read_word, 4);
+opal_call(OPAL_PCI_CONFIG_WRITE_BYTE, opal_pci_config_write_byte, 4);
+opal_call(OPAL_PCI_CONFIG_WRITE_HALF_WORD, opal_pci_config_write_half_word, 4);
+opal_call(OPAL_PCI_CONFIG_WRITE_WORD, opal_pci_config_write_word, 4);
+
+static int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
+ uint8_t *freeze_state,
+ uint16_t *pci_error_type,
+ uint64_t *phb_status)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->eeh_freeze_status)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->eeh_freeze_status(phb, pe_number, freeze_state,
+ pci_error_type, NULL, phb_status);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_EEH_FREEZE_STATUS, opal_pci_eeh_freeze_status, 5);
+
+static int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
+ uint64_t eeh_action_token)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->eeh_freeze_clear)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->eeh_freeze_clear(phb, pe_number, eeh_action_token);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_EEH_FREEZE_CLEAR, opal_pci_eeh_freeze_clear, 3);
+
+static int64_t opal_pci_phb_mmio_enable(uint64_t phb_id, uint16_t window_type,
+ uint16_t window_num, uint16_t enable)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->phb_mmio_enable)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->phb_mmio_enable(phb, window_type, window_num, enable);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_PHB_MMIO_ENABLE, opal_pci_phb_mmio_enable, 4);
+
+static int64_t opal_pci_set_phb_mem_window(uint64_t phb_id,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint64_t addr,
+ uint64_t pci_addr,
+ uint64_t size)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_phb_mem_window)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->set_phb_mem_window(phb, window_type, window_num,
+ addr, pci_addr, size);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_SET_PHB_MEM_WINDOW, opal_pci_set_phb_mem_window, 6);
+
+static int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint16_t segment_num)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->map_pe_mmio_window)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->map_pe_mmio_window(phb, pe_number, window_type,
+ window_num, segment_num);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_MAP_PE_MMIO_WINDOW, opal_pci_map_pe_mmio_window, 5);
+
+static int64_t opal_pci_set_phb_table_memory(uint64_t phb_id __unused,
+ uint64_t rtt_addr __unused,
+ uint64_t ivt_addr __unused,
+ uint64_t ivt_len __unused,
+ uint64_t rej_array_addr __unused,
+ uint64_t peltv_addr __unused)
+{
+ /* IODA2 (P8) stuff, TODO */
+ return OPAL_UNSUPPORTED;
+}
+opal_call(OPAL_PCI_SET_PHB_TABLE_MEMORY, opal_pci_set_phb_table_memory, 6);
+
+static int64_t opal_pci_set_pe(uint64_t phb_id, uint64_t pe_number,
+ uint64_t bus_dev_func, uint8_t bus_compare,
+ uint8_t dev_compare, uint8_t func_compare,
+ uint8_t pe_action)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_pe)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->set_pe(phb, pe_number, bus_dev_func, bus_compare,
+ dev_compare, func_compare, pe_action);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_SET_PE, opal_pci_set_pe, 7);
+
+static int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe,
+ uint32_t child_pe, uint8_t state)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_peltv)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->set_peltv(phb, parent_pe, child_pe, state);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_SET_PELTV, opal_pci_set_peltv, 4);
+
+static int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number,
+ uint32_t pe_number)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_mve)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->set_mve(phb, mve_number, pe_number);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_SET_MVE, opal_pci_set_mve, 3);
+
+static int64_t opal_pci_set_mve_enable(uint64_t phb_id, uint32_t mve_number,
+ uint32_t state)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_mve_enable)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->set_mve_enable(phb, mve_number, state);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_SET_MVE_ENABLE, opal_pci_set_mve_enable, 3);
+
+static int64_t opal_pci_get_xive_reissue(uint64_t phb_id __unused,
+ uint32_t xive_number __unused,
+ uint8_t *p_bit __unused,
+ uint8_t *q_bit __unused)
+{
+ /* IODA2 (P8) stuff, TODO */
+ return OPAL_UNSUPPORTED;
+}
+opal_call(OPAL_PCI_GET_XIVE_REISSUE, opal_pci_get_xive_reissue, 4);
+
+static int64_t opal_pci_set_xive_reissue(uint64_t phb_id __unused,
+ uint32_t xive_number __unused,
+ uint8_t p_bit __unused,
+ uint8_t q_bit __unused)
+{
+ /* IODA2 (P8) stuff, TODO */
+ return OPAL_UNSUPPORTED;
+}
+opal_call(OPAL_PCI_SET_XIVE_REISSUE, opal_pci_set_xive_reissue, 4);
+
+static int64_t opal_pci_msi_eoi(uint64_t phb_id,
+ uint32_t hwirq)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->pci_msi_eoi)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->pci_msi_eoi(phb, hwirq);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_MSI_EOI, opal_pci_msi_eoi, 2);
+
+static int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
+ uint32_t xive_num)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_xive_pe)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->set_xive_pe(phb, pe_number, xive_num);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_SET_XIVE_PE, opal_pci_set_xive_pe, 3);
+
+static int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
+ int32_t *interrupt_source_number)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->get_xive_source)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->get_xive_source(phb, xive_num, interrupt_source_number);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_GET_XIVE_SOURCE, opal_get_xive_source, 3);
+
+static int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ uint32_t *msi_address, uint32_t *message_data)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->get_msi_32)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->get_msi_32(phb, mve_number, xive_num, msi_range,
+ msi_address, message_data);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_GET_MSI_32, opal_get_msi_32, 6);
+
+static int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ uint64_t *msi_address, uint32_t *message_data)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->get_msi_64)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->get_msi_64(phb, mve_number, xive_num, msi_range,
+ msi_address, message_data);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_GET_MSI_64, opal_get_msi_64, 6);
+
+static int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number,
+ uint16_t window_id,
+ uint16_t tce_levels,
+ uint64_t tce_table_addr,
+ uint64_t tce_table_size,
+ uint64_t tce_page_size)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->map_pe_dma_window)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->map_pe_dma_window(phb, pe_number, window_id,
+ tce_levels, tce_table_addr,
+ tce_table_size, tce_page_size);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_MAP_PE_DMA_WINDOW, opal_pci_map_pe_dma_window, 7);
+
+static int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id,
+ uint16_t pe_number,
+ uint16_t window_id,
+ uint64_t pci_start_addr,
+ uint64_t pci_mem_size)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->map_pe_dma_window_real)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->map_pe_dma_window_real(phb, pe_number, window_id,
+ pci_start_addr, pci_mem_size);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_MAP_PE_DMA_WINDOW_REAL, opal_pci_map_pe_dma_window_real, 5);
+
+static int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope,
+ uint8_t assert_state)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc = OPAL_SUCCESS;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops)
+ return OPAL_UNSUPPORTED;
+ if (assert_state != OPAL_ASSERT_RESET &&
+ assert_state != OPAL_DEASSERT_RESET)
+ return OPAL_PARAMETER;
+
+ phb->ops->lock(phb);
+
+ switch(reset_scope) {
+ case OPAL_RESET_PHB_COMPLETE:
+ if (!phb->ops->complete_reset) {
+ rc = OPAL_UNSUPPORTED;
+ break;
+ }
+
+ rc = phb->ops->complete_reset(phb, assert_state);
+ if (rc < 0)
+ prerror("PHB#%d: Failure on complete reset, rc=%lld\n",
+ phb->opal_id, rc);
+ break;
+ case OPAL_RESET_PCI_FUNDAMENTAL:
+ if (!phb->ops->fundamental_reset) {
+ rc = OPAL_UNSUPPORTED;
+ break;
+ }
+
+ /* We need do nothing on deassert time */
+ if (assert_state != OPAL_ASSERT_RESET)
+ break;
+
+ rc = phb->ops->fundamental_reset(phb);
+ if (rc < 0)
+ prerror("PHB#%d: Failure on fundamental reset, rc=%lld\n",
+ phb->opal_id, rc);
+ break;
+ case OPAL_RESET_PCI_HOT:
+ if (!phb->ops->hot_reset) {
+ rc = OPAL_UNSUPPORTED;
+ break;
+ }
+
+ /* We need do nothing on deassert time */
+ if (assert_state != OPAL_ASSERT_RESET)
+ break;
+
+ rc = phb->ops->hot_reset(phb);
+ if (rc < 0)
+ prerror("PHB#%d: Failure on hot reset, rc=%lld\n",
+ phb->opal_id, rc);
+ break;
+ case OPAL_RESET_PCI_IODA_TABLE:
+ if (assert_state != OPAL_ASSERT_RESET)
+ break;
+ if (phb->ops->ioda_reset)
+ phb->ops->ioda_reset(phb, true);
+ break;
+ default:
+ rc = OPAL_UNSUPPORTED;
+ }
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return (rc > 0) ? tb_to_msecs(rc) : rc;
+}
+opal_call(OPAL_PCI_RESET, opal_pci_reset, 3);
+
+static int64_t opal_pci_reinit(uint64_t phb_id,
+ uint64_t reinit_scope,
+ uint64_t data)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops || !phb->ops->pci_reinit)
+ return OPAL_UNSUPPORTED;
+
+ phb->ops->lock(phb);
+ rc = phb->ops->pci_reinit(phb, reinit_scope, data);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_REINIT, opal_pci_reinit, 3);
+
+static int64_t opal_pci_poll(uint64_t phb_id)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops || !phb->ops->poll)
+ return OPAL_UNSUPPORTED;
+
+ phb->ops->lock(phb);
+ rc = phb->ops->poll(phb);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ /* Return milliseconds for caller to sleep: round up */
+ if (rc > 0) {
+ rc = tb_to_msecs(rc);
+ if (rc == 0)
+ rc = 1;
+ }
+
+ return rc;
+}
+opal_call(OPAL_PCI_POLL, opal_pci_poll, 1);
+
+static int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id,
+ uint64_t tce_mem_addr,
+ uint64_t tce_mem_size)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_phb_tce_memory)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->set_phb_tce_memory(phb, tce_mem_addr, tce_mem_size);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_SET_PHB_TCE_MEMORY, opal_pci_set_phb_tce_memory, 3);
+
+static int64_t opal_pci_get_phb_diag_data(uint64_t phb_id,
+ void *diag_buffer,
+ uint64_t diag_buffer_len)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->get_diag_data)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->get_diag_data(phb, diag_buffer, diag_buffer_len);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_GET_PHB_DIAG_DATA, opal_pci_get_phb_diag_data, 3);
+
+static int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id,
+ void *diag_buffer,
+ uint64_t diag_buffer_len)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->get_diag_data2)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->get_diag_data2(phb, diag_buffer, diag_buffer_len);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_GET_PHB_DIAG_DATA2, opal_pci_get_phb_diag_data2, 3);
+
+static int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
+ uint16_t *pci_error_type, uint16_t *severity)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->next_error)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+
+ /* Any call to this function clears the error event */
+ opal_update_pending_evt(OPAL_EVENT_PCI_ERROR, 0);
+ rc = phb->ops->next_error(phb, first_frozen_pe, pci_error_type,
+ severity);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_NEXT_ERROR, opal_pci_next_error, 4);
+
+static int64_t opal_pci_eeh_freeze_status2(uint64_t phb_id, uint64_t pe_number,
+ uint8_t *freeze_state,
+ uint16_t *pci_error_type,
+ uint16_t *severity,
+ uint64_t *phb_status)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->eeh_freeze_status)
+ return OPAL_UNSUPPORTED;
+ phb->ops->lock(phb);
+ rc = phb->ops->eeh_freeze_status(phb, pe_number, freeze_state,
+ pci_error_type, severity, phb_status);
+ phb->ops->unlock(phb);
+ pci_put_phb(phb);
+
+ return rc;
+}
+opal_call(OPAL_PCI_EEH_FREEZE_STATUS2, opal_pci_eeh_freeze_status2, 6);
+
+static int64_t opal_pci_set_phb_capi_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number)
+{
+ struct phb *phb = pci_get_phb(phb_id);
+ int64_t rc;
+
+ if (!phb)
+ return OPAL_PARAMETER;
+ if (!phb->ops->set_capi_mode)
+ return OPAL_UNSUPPORTED;
+ if (mode == 1) {
+ phb->ops->lock(phb);
+ rc = phb->ops->set_capi_mode(phb, mode, pe_number);
+ phb->ops->unlock(phb);
+ return rc;
+ }
+ if (mode == 0) {
+ /* FIXME add support for PCI mode*/
+ }
+ return OPAL_UNSUPPORTED;
+}
+opal_call(OPAL_PCI_SET_PHB_CAPI_MODE, opal_pci_set_phb_capi_mode, 3);
diff --git a/core/pci.c b/core/pci.c
new file mode 100644
index 0000000..f07908b
--- /dev/null
+++ b/core/pci.c
@@ -0,0 +1,1388 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <pci.h>
+#include <pci-cfg.h>
+#include <timebase.h>
+#include <lock.h>
+#include <device.h>
+
+static struct lock pci_lock = LOCK_UNLOCKED;
+#define PCI_MAX_PHBs 64
+static struct phb *phbs[PCI_MAX_PHBs];
+
+#define DBG(fmt...) do { } while(0)
+
+/*
+ * Generic PCI utilities
+ */
+
+/* pci_find_cap - Find a PCI capability in a device config space
+ *
+ * This will return a config space offset (positive) or a negative
+ * error (OPAL error codes).
+ *
+ * OPAL_UNSUPPORTED is returned if the capability doesn't exist
+ */
+int64_t pci_find_cap(struct phb *phb, uint16_t bdfn, uint8_t want)
+{
+ int64_t rc;
+ uint16_t stat, cap;
+ uint8_t pos, next;
+
+ rc = pci_cfg_read16(phb, bdfn, PCI_CFG_STAT, &stat);
+ if (rc)
+ return rc;
+ if (!(stat & PCI_CFG_STAT_CAP))
+ return OPAL_UNSUPPORTED;
+ rc = pci_cfg_read8(phb, bdfn, PCI_CFG_CAP, &pos);
+ if (rc)
+ return rc;
+ pos &= 0xfc;
+ while(pos) {
+ rc = pci_cfg_read16(phb, bdfn, pos, &cap);
+ if (rc)
+ return rc;
+ if ((cap & 0xff) == want)
+ return pos;
+ next = (cap >> 8) & 0xfc;
+ if (next == pos) {
+ prerror("PHB%d: dev %04x pci_find_cap hit a loop !\n",
+ phb->opal_id, bdfn);
+ break;
+ }
+ pos = next;
+ }
+ return OPAL_UNSUPPORTED;
+}
+
+/* pci_find_ecap - Find a PCIe extended capability in a device
+ * config space
+ *
+ * This will return a config space offset (positive) or a negative
+ * error (OPAL error code). Additionally, if the "version" argument
+ * is non-NULL, the capability version will be returned there.
+ *
+ * OPAL_UNSUPPORTED is returned if the capability doesn't exist
+ */
+int64_t pci_find_ecap(struct phb *phb, uint16_t bdfn, uint16_t want,
+ uint8_t *version)
+{
+ int64_t rc;
+ uint32_t cap;
+ uint16_t off, prev = 0;
+
+ for (off = 0x100; off && off < 0x1000; off = (cap >> 20) & 0xffc ) {
+ if (off == prev) {
+ prerror("PHB%d: dev %04x pci_find_ecap hit a loop !\n",
+ phb->opal_id, bdfn);
+ break;
+ }
+ prev = off;
+ rc = pci_cfg_read32(phb, bdfn, off, &cap);
+ if (rc)
+ return rc;
+ if ((cap & 0xffff) == want) {
+ if (version)
+ *version = (cap >> 16) & 0xf;
+ return off;
+ }
+ }
+ return OPAL_UNSUPPORTED;
+}
+
+static struct pci_device *pci_scan_one(struct phb *phb, struct pci_device *parent,
+ uint16_t bdfn)
+{
+ struct pci_device *pd = NULL;
+ uint32_t retries, vdid, val;
+ int64_t rc, ecap;
+ uint8_t htype;
+ uint16_t capreg;
+ bool had_crs = false;
+
+ for (retries = 40; retries; retries--) {
+ rc = pci_cfg_read32(phb, bdfn, 0, &vdid);
+ if (rc)
+ return NULL;
+ if (vdid == 0xffffffff || vdid == 0x00000000)
+ return NULL;
+ if (vdid != 0xffff0001)
+ break;
+ had_crs = true;
+ time_wait_ms(100);
+ }
+ if (vdid == 0xffff0001) {
+ prerror("PCI: Device %04x CRS timeout !\n", bdfn);
+ return NULL;
+ }
+ if (had_crs)
+ printf("PCI: Device %04x replied after CRS\n", bdfn);
+ pd = zalloc(sizeof(struct pci_device));
+ if (!pd) {
+ prerror("PCI: Failed to allocate structure pci_device !\n");
+ goto fail;
+ }
+ pd->bdfn = bdfn;
+ pd->parent = parent;
+ list_head_init(&pd->children);
+ rc = pci_cfg_read8(phb, bdfn, PCI_CFG_HDR_TYPE, &htype);
+ if (rc) {
+ prerror("PCI: Failed to read header type !\n");
+ goto fail;
+ }
+ pd->is_multifunction = !!(htype & 0x80);
+ pd->is_bridge = (htype & 0x7f) != 0;
+ pd->scan_map = 0xffffffff; /* Default */
+
+ ecap = pci_find_cap(phb, bdfn, PCI_CFG_CAP_ID_EXP);
+ if (ecap > 0) {
+ pci_set_cap(pd, PCI_CFG_CAP_ID_EXP, ecap, false);
+ pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_CAPABILITY_REG,
+ &capreg);
+ pd->dev_type = GETFIELD(PCICAP_EXP_CAP_TYPE, capreg);
+
+ /*
+ * XXX We observe a problem on some PLX switches where one
+ * of the downstream ports appears as an upstream port, we
+ * fix that up here otherwise, other code will misbehave
+ */
+ if (pd->parent && pd->dev_type == PCIE_TYPE_SWITCH_UPPORT &&
+ pd->parent->dev_type == PCIE_TYPE_SWITCH_UPPORT &&
+ vdid == 0x874810b5) {
+ prerror("PCI: Fixing up bad PLX downstream port !\n");
+ pd->dev_type = PCIE_TYPE_SWITCH_DNPORT;
+ }
+
+ /* XXX Handle ARI */
+ if (pd->dev_type == PCIE_TYPE_SWITCH_DNPORT ||
+ pd->dev_type == PCIE_TYPE_ROOT_PORT)
+ pd->scan_map = 0x1;
+
+ /* Read MPS capability, whose maximal size is 4096 */
+ pci_cfg_read32(phb, bdfn, ecap + PCICAP_EXP_DEVCAP, &val);
+ pd->mps = (128 << GETFIELD(PCICAP_EXP_DEVCAP_MPSS, val));
+ if (pd->mps > 4096)
+ pd->mps = 4096;
+ } else {
+ pd->dev_type = PCIE_TYPE_LEGACY;
+ }
+
+ /* If it's a bridge, sanitize the bus numbers to avoid forwarding
+ *
+ * This will help when walking down those bridges later on
+ */
+ if (pd->is_bridge) {
+ pci_cfg_write8(phb, bdfn, PCI_CFG_PRIMARY_BUS, bdfn >> 8);
+ pci_cfg_write8(phb, bdfn, PCI_CFG_SECONDARY_BUS, 0);
+ pci_cfg_write8(phb, bdfn, PCI_CFG_SUBORDINATE_BUS, 0);
+ }
+
+ /* XXX Need to do some basic setups, such as MPSS, MRS,
+ * RCB, etc...
+ */
+
+ printf("PCI: Device %04x VID:%04x DEV:%04x TYP:%d MF%s BR%s EX%s\n",
+ bdfn, vdid & 0xffff, vdid >> 16, pd->dev_type,
+ pd->is_multifunction ? "+" : "-",
+ pd->is_bridge ? "+" : "-",
+ pci_has_cap(pd, PCI_CFG_CAP_ID_EXP, false) ? "+" : "-");
+
+ /*
+ * Call PHB hook
+ */
+ if (phb->ops->device_init)
+ phb->ops->device_init(phb, pd);
+
+ return pd;
+ fail:
+ if (pd)
+ free(pd);
+ return NULL;
+}
+
+/* pci_check_clear_freeze - Probing empty slot will result in an EEH
+ * freeze. Currently we have a single PE mapping
+ * everything (default state of our backend) so
+ * we just check and clear the state of PE#0
+ *
+ * NOTE: We currently only handle simple PE freeze, not PHB fencing
+ * (or rather our backend does)
+ */
+static void pci_check_clear_freeze(struct phb *phb)
+{
+ int64_t rc;
+ uint8_t freeze_state;
+ uint16_t pci_error_type, sev;
+
+ rc = phb->ops->eeh_freeze_status(phb, 0, &freeze_state,
+ &pci_error_type, &sev, NULL);
+ if (rc)
+ return;
+ if (freeze_state == OPAL_EEH_STOPPED_NOT_FROZEN)
+ return;
+ /* We can't handle anything worse than an ER here */
+ if (sev > OPAL_EEH_SEV_NO_ERROR &&
+ sev < OPAL_EEH_SEV_PE_ER) {
+ prerror("PCI: PHB%d fatal probe error !\n", phb->opal_id);
+ return;
+ }
+ phb->ops->eeh_freeze_clear(phb, 0, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+}
+
+/* pci_enable_bridge - Called before scanning a bridge
+ *
+ * Ensures error flags are clean, disable master abort, and
+ * check if the subordinate bus isn't reset, the slot is enabled
+ * on PCIe, etc...
+ */
+static bool pci_enable_bridge(struct phb *phb, struct pci_device *pd)
+{
+ uint16_t bctl;
+ bool was_reset = false;
+ int64_t ecap = 0;
+
+ /* Disable master aborts, clear errors */
+ pci_cfg_read16(phb, pd->bdfn, PCI_CFG_BRCTL, &bctl);
+ bctl &= ~PCI_CFG_BRCTL_MABORT_REPORT;
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_BRCTL, bctl);
+
+ /* PCI-E bridge, check the slot state */
+ if (pd->dev_type == PCIE_TYPE_ROOT_PORT ||
+ pd->dev_type == PCIE_TYPE_SWITCH_DNPORT) {
+ uint16_t slctl, slcap, slsta, lctl;
+
+ ecap = pci_cap(pd, PCI_CFG_CAP_ID_EXP, false);
+
+ /* Read the slot status & check for presence detect */
+ pci_cfg_read16(phb, pd->bdfn, ecap+PCICAP_EXP_SLOTSTAT, &slsta);
+ DBG(" slstat=%04x\n", slsta);
+ if (!(slsta & PCICAP_EXP_SLOTSTAT_PDETECTST)) {
+ printf("PCI: No card in slot\n");
+ return false;
+ }
+
+ /* Read the slot capabilities */
+ pci_cfg_read16(phb, pd->bdfn, ecap+PCICAP_EXP_SLOTCAP, &slcap);
+ DBG(" slcap=%04x\n", slcap);
+ if (!(slcap & PCICAP_EXP_SLOTCAP_PWCTRL))
+ goto power_is_on;
+
+ /* Read the slot control register, check if the slot is off */
+ pci_cfg_read16(phb, pd->bdfn, ecap+PCICAP_EXP_SLOTCTL, &slctl);
+ DBG(" slctl=%04x\n", slctl);
+ if (!(slctl & PCICAP_EXP_SLOTCTL_PWRCTLR))
+ goto power_is_on;
+
+ /* Turn power on
+ *
+ * XXX This is a "command", we should wait for it to complete
+ * etc... but just waiting 2s will do for now
+ */
+ DBG("PCI: Bridge power is off, turning on ...\n");
+ slctl &= ~PCICAP_EXP_SLOTCTL_PWRCTLR;
+ slctl |= SETFIELD(PCICAP_EXP_SLOTCTL_PWRI, 0, PCIE_INDIC_ON);
+ pci_cfg_write16(phb, pd->bdfn, ecap+PCICAP_EXP_SLOTCTL, slctl);
+
+ /* Wait a couple of seconds */
+ time_wait_ms(2000);
+
+ power_is_on:
+ /* Enable link */
+ pci_cfg_read16(phb, pd->bdfn, ecap+PCICAP_EXP_LCTL, &lctl);
+ DBG(" lctl=%04x\n", lctl);
+ lctl &= ~PCICAP_EXP_LCTL_LINK_DIS;
+ pci_cfg_write16(phb, pd->bdfn, ecap+PCICAP_EXP_LCTL, lctl);
+ }
+
+ /* Clear secondary reset */
+ if (bctl & PCI_CFG_BRCTL_SECONDARY_RESET) {
+ printf("PCI: Bridge secondary reset is on, clearing it ...\n");
+ bctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_BRCTL, bctl);
+ time_wait_ms(1000);
+ was_reset = true;
+ }
+
+ /* PCI-E bridge, wait for link */
+ if (pd->dev_type == PCIE_TYPE_ROOT_PORT ||
+ pd->dev_type == PCIE_TYPE_SWITCH_DNPORT) {
+ uint32_t lcap;
+
+ /* Read link caps */
+ pci_cfg_read32(phb, pd->bdfn, ecap+PCICAP_EXP_LCAP, &lcap);
+
+ /* Did link capability say we got reporting ?
+ *
+ * If yes, wait up to 10s, if not, wait 1s if we didn't already
+ */
+ if (lcap & PCICAP_EXP_LCAP_DL_ACT_REP) {
+ uint32_t retries = 100;
+ uint16_t lstat;
+
+ printf("%016lx: waiting for link... \n", mftb());
+
+ while(retries--) {
+ pci_cfg_read16(phb, pd->bdfn,
+ ecap+PCICAP_EXP_LSTAT, &lstat);
+ if (lstat & PCICAP_EXP_LSTAT_DLLL_ACT)
+ break;
+ time_wait_ms(100);
+ }
+ printf("%016lx: end wait for link...\n", mftb());
+ if (!(lstat & PCICAP_EXP_LSTAT_DLLL_ACT)) {
+ prerror("PCI: Bridge %04x, timeout waiting"
+ " for downstream link\n", pd->bdfn);
+ return false;
+ }
+ /* Need to wait another 100ms before touching
+ * the config space
+ */
+ time_wait_ms(100);
+ } else if (!was_reset)
+ time_wait_ms(1000);
+ }
+
+ /* Clear error status */
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_STAT, 0xffff);
+
+ return true;
+}
+
+/* Clear up bridge resources */
+static void pci_cleanup_bridge(struct phb *phb, struct pci_device *pd)
+{
+ uint16_t cmd;
+
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_IO_BASE_U16, 0xffff);
+ pci_cfg_write8(phb, pd->bdfn, PCI_CFG_IO_BASE, 0xf0);
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_IO_LIMIT_U16, 0);
+ pci_cfg_write8(phb, pd->bdfn, PCI_CFG_IO_LIMIT, 0);
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_MEM_BASE, 0xfff0);
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_MEM_LIMIT, 0);
+ pci_cfg_write32(phb, pd->bdfn, PCI_CFG_PREF_MEM_BASE_U32, 0xffffffff);
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_PREF_MEM_BASE, 0xfff0);
+ pci_cfg_write32(phb, pd->bdfn, PCI_CFG_PREF_MEM_LIMIT_U32, 0);
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_PREF_MEM_LIMIT, 0);
+
+ /* Note: This is a bit fishy but since we have closed all the
+ * bridge windows above, it shouldn't be a problem. Basically
+ * we enable Memory, IO and Bus Master on the bridge because
+ * some versions of Linux will fail to do it themselves.
+ */
+ pci_cfg_read16(phb, pd->bdfn, PCI_CFG_CMD, &cmd);
+ cmd |= PCI_CFG_CMD_IO_EN | PCI_CFG_CMD_MEM_EN;
+ cmd |= PCI_CFG_CMD_BUS_MASTER_EN;
+ pci_cfg_write16(phb, pd->bdfn, PCI_CFG_CMD, cmd);
+}
+
+
+/* pci_scan - Perform a recursive scan of the bus at bus_number
+ * populating the list passed as an argument. This also
+ * performs the bus numbering, so it returns the largest
+ * bus number that was assigned.
+ *
+ * Note: Eventually this might want to access some VPD information
+ * in order to know what slots to scan and what not etc..
+ *
+ * XXX NOTE: We might want to enable ARI along the way...
+ *
+ * XXX NOTE: We might also want to setup the PCIe MPS/MRSS properly
+ * here as Linux may or may not do it
+ */
+static uint8_t pci_scan(struct phb *phb, uint8_t bus, uint8_t max_bus,
+ struct list_head *list, struct pci_device *parent,
+ bool scan_downstream)
+{
+ struct pci_device *pd = NULL;
+ uint8_t dev, fn, next_bus, max_sub, save_max;
+ uint32_t scan_map;
+
+ /* Decide what to scan */
+ scan_map = parent ? parent->scan_map : phb->scan_map;
+
+ /* Do scan */
+ for (dev = 0; dev < 32; dev++) {
+ if (!(scan_map & (1ul << dev)))
+ continue;
+
+ /* Scan the device */
+ pd = pci_scan_one(phb, parent, (bus << 8) | (dev << 3));
+ pci_check_clear_freeze(phb);
+ if (!pd)
+ continue;
+
+ /* Get slot info if any */
+ if (platform.pci_get_slot_info)
+ platform.pci_get_slot_info(phb, pd);
+
+ /* Link it up */
+ list_add_tail(list, &pd->link);
+
+ /* XXX Handle ARI */
+ if (!pd->is_multifunction)
+ continue;
+ for (fn = 1; fn < 8; fn++) {
+ pd = pci_scan_one(phb, parent,
+ ((uint16_t)bus << 8) | (dev << 3) | fn);
+ pci_check_clear_freeze(phb);
+ if (pd) {
+ if (platform.pci_get_slot_info)
+ platform.pci_get_slot_info(phb, pd);
+ list_add_tail(list, &pd->link);
+ }
+ }
+ }
+
+ /*
+ * We only scan downstream if instructed to do so by the
+ * caller. Typically we avoid the scan when we know the
+ * link is down already, which happens for the top level
+ * root complex, and avoids a long secondary timeout
+ */
+ if (!scan_downstream)
+ return bus;
+
+ next_bus = bus + 1;
+ max_sub = bus;
+ save_max = max_bus;
+
+ /* Scan down bridges */
+ list_for_each(list, pd, link) {
+ bool use_max, do_scan;
+
+ if (!pd->is_bridge)
+ continue;
+
+ /* We need to figure out a new bus number to start from.
+ *
+ * This can be tricky due to our HW constraints which differ
+ * from bridge to bridge so we are going to let the phb
+ * driver decide what to do. This can return us a maximum
+ * bus number to assign as well
+ *
+ * This function will:
+ *
+ * - Return the bus number to use as secondary for the
+ * bridge or 0 for a failure
+ *
+ * - "max_bus" will be adjusted to represent the max
+ * subordinate that can be associated with the downstream
+ * device
+ *
+ * - "use_max" will be set to true if the returned max_bus
+ * *must* be used as the subordinate bus number of that
+ * bridge (when we need to give aligned powers of two's
+ * on P7IOC). If is is set to false, we just adjust the
+ * subordinate bus number based on what we probed.
+ *
+ */
+ max_bus = save_max;
+ next_bus = phb->ops->choose_bus(phb, pd, next_bus,
+ &max_bus, &use_max);
+
+ /* Configure the bridge with the returned values */
+ if (next_bus <= bus) {
+ printf("PCI: Bridge %04x, out of bus numbers !\n",
+ pd->bdfn);
+ max_bus = next_bus = 0; /* Failure case */
+ }
+ pci_cfg_write8(phb, pd->bdfn, PCI_CFG_SECONDARY_BUS, next_bus);
+ pci_cfg_write8(phb, pd->bdfn, PCI_CFG_SUBORDINATE_BUS, max_bus);
+ if (!next_bus)
+ break;
+
+ printf("PCI: Bridge %04x, bus: %02x..%02x %s scanning...\n",
+ pd->bdfn, next_bus, max_bus, use_max ? "[use max]" : "");
+
+ /* Clear up bridge resources */
+ pci_cleanup_bridge(phb, pd);
+
+ /* Configure the bridge. This will enable power to the slot
+ * if it's currently disabled, lift reset, etc...
+ *
+ * Return false if we know there's nothing behind the bridge
+ */
+ do_scan = pci_enable_bridge(phb, pd);
+
+ /* Perform recursive scan */
+ if (do_scan) {
+ max_sub = pci_scan(phb, next_bus, max_bus,
+ &pd->children, pd, true);
+ } else if (!use_max) {
+ /* XXX Empty bridge... we leave room for hotplug
+ * slots etc.. but we should be smarter at figuring
+ * out if this is actually a hotpluggable one
+ */
+ max_sub = next_bus + 4;
+ if (max_sub > max_bus)
+ max_sub = max_bus;
+ }
+
+ /* Update the max subordinate as described previously */
+ if (use_max)
+ max_sub = max_bus;
+ pci_cfg_write8(phb, pd->bdfn, PCI_CFG_SUBORDINATE_BUS, max_sub);
+ next_bus = max_sub + 1;
+ }
+
+ return max_sub;
+}
+
+static int pci_get_mps(struct phb *phb,
+ struct pci_device *pd, void *userdata)
+{
+ uint32_t *mps = (uint32_t *)userdata;
+
+ /* Only check PCI device that had MPS capacity */
+ if (phb && pd && pd->mps && *mps > pd->mps)
+ *mps = pd->mps;
+
+ return 0;
+}
+
+static int __pci_configure_mps(struct phb *phb,
+ struct pci_device *pd,
+ void *userdata __unused)
+{
+ uint32_t ecap, mps = phb->mps;
+ uint16_t val;
+
+ /* If the MPS isn't acceptable one, bail immediately */
+ if (mps < 128 || mps > 4096)
+ return 1;
+
+ if (!phb || !pd)
+ return 0;
+
+ /* PCIe deivce always has MPS capacity */
+ if (pd->mps) {
+ ecap = pci_cap(pd, PCI_CFG_CAP_ID_EXP, false);
+ mps = ilog2(mps) - 7;
+
+ pci_cfg_read16(phb, pd->bdfn, ecap + PCICAP_EXP_DEVCTL, &val);
+ val = SETFIELD(PCICAP_EXP_DEVCTL_MPS, val, mps);
+ pci_cfg_write16(phb, pd->bdfn, ecap + PCICAP_EXP_DEVCTL, val);
+ }
+
+ return 0;
+}
+
+int32_t pci_configure_mps(struct phb *phb, struct pci_device *pd)
+{
+ return __pci_configure_mps(phb, pd, NULL);
+}
+
+/*
+ * The power state would be checked. If the power has
+ * been on, we will issue fundamental reset. Otherwise,
+ * we will power it on before issuing fundamental reset.
+ */
+static int64_t pci_reset_phb(struct phb *phb)
+{
+ const char *desc;
+ int64_t rc;
+
+ rc = phb->ops->power_state(phb);
+ if (rc < 0) {
+ printf("PHB%d: Failed to get power state, rc=%lld\n",
+ phb->opal_id, rc);
+ return rc;
+ }
+
+ if (rc == OPAL_SHPC_POWER_ON) {
+ desc = "fundamental reset";
+ rc = phb->ops->fundamental_reset(phb);
+ } else {
+ desc = "power on";
+ rc = phb->ops->slot_power_on(phb);
+ }
+
+ if (rc < 0) {
+ /* Don't warn if it's just an empty slot */
+ if (rc != OPAL_CLOSED)
+ printf("PHB%d: Failed to %s, rc=%lld\n",
+ phb->opal_id, desc, rc);
+ return rc;
+ }
+
+ /* Wait the internal state machine */
+ while (rc > 0) {
+ time_wait(rc);
+ rc = phb->ops->poll(phb);
+ }
+ if (rc < 0)
+ printf("PHB%d: Failed to %s, rc=%lld\n",
+ phb->opal_id, desc, rc);
+
+ return rc;
+}
+
+static void pci_init_slot(struct phb *phb)
+{
+ uint32_t mps = 0xffffffff;
+ int64_t rc;
+ bool has_link;
+
+ printf("PHB%d: Init slot\n", phb->opal_id);
+
+ /*
+ * For PCI/PCI-X, we get the slot info and we also
+ * check if the PHB has anything connected to it
+ */
+ if (phb->phb_type < phb_type_pcie_v1) {
+ if (platform.pci_get_slot_info)
+ platform.pci_get_slot_info(phb, NULL);
+ rc = phb->ops->presence_detect(phb);
+ if (rc != OPAL_SHPC_DEV_PRESENT) {
+ printf("PHB%d: Slot empty\n", phb->opal_id);
+ return;
+ }
+ }
+
+ /*
+ * Power on the PHB, the PHB should be reset in
+ * fundamental way while powering on. The reset
+ * state machine is going to wait for the link
+ */
+ rc = pci_reset_phb(phb);
+ if (rc && rc != OPAL_CLOSED)
+ return;
+
+ /* It's up, print some things */
+ rc = phb->ops->link_state(phb);
+ if (rc < 0) {
+ printf("PHB%d: Failed to query link state, rc=%lld\n",
+ phb->opal_id, rc);
+ return;
+ }
+ has_link = rc != OPAL_SHPC_LINK_DOWN;
+
+ if(!has_link)
+ printf("PHB%d: Link down\n", phb->opal_id);
+ else if (phb->phb_type >= phb_type_pcie_v1)
+ printf("PHB%d: Link up at x%lld width\n", phb->opal_id, rc);
+
+ printf("PHB%d: Scanning (upstream%s)...\n", phb->opal_id,
+ has_link ? "+downsteam" : " only");
+ pci_scan(phb, 0, 0xff, &phb->devices, NULL, has_link);
+
+ /* Configre MPS (Max Payload Size) for PCIe domain */
+ pci_walk_dev(phb, pci_get_mps, &mps);
+ phb->mps = mps;
+ pci_walk_dev(phb, __pci_configure_mps, NULL);
+}
+
+int64_t pci_register_phb(struct phb *phb)
+{
+ int64_t rc = OPAL_SUCCESS;
+ unsigned int i;
+
+ lock(&pci_lock);
+ for (i = 0; i < PCI_MAX_PHBs; i++)
+ if (!phbs[i])
+ break;
+ if (i >= PCI_MAX_PHBs) {
+ prerror("PHB: Failed to find a free ID slot\n");
+ rc = OPAL_RESOURCE;
+ } else {
+ phbs[i] = phb;
+ phb->opal_id = i;
+ dt_add_property_cells(phb->dt_node, "ibm,opal-phbid",
+ 0, phb->opal_id);
+ printf("PCI: Registered PHB ID %d\n", i);
+ }
+ list_head_init(&phb->devices);
+ unlock(&pci_lock);
+
+ return rc;
+}
+
+int64_t pci_unregister_phb(struct phb *phb)
+{
+ /* XXX We want some kind of RCU or RWlock to make things
+ * like that happen while no OPAL callback is in progress,
+ * that way we avoid taking a lock in each of them.
+ *
+ * Right now we don't unregister so we are fine
+ */
+ lock(&pci_lock);
+ phbs[phb->opal_id] = phb;
+ unlock(&pci_lock);
+
+ return OPAL_SUCCESS;
+}
+
+struct phb *pci_get_phb(uint64_t phb_id)
+{
+ if (phb_id >= PCI_MAX_PHBs)
+ return NULL;
+
+ /* XXX See comment in pci_unregister_phb() about locking etc... */
+ return phbs[phb_id];
+}
+
+static const char *pci_class_name(uint32_t class_code)
+{
+ uint8_t class = class_code >> 16;
+ uint8_t sub = (class_code >> 8) & 0xff;
+ uint8_t pif = class_code & 0xff;
+
+ switch(class) {
+ case 0x00:
+ switch(sub) {
+ case 0x00: return "device";
+ case 0x01: return "vga";
+ }
+ break;
+ case 0x01:
+ switch(sub) {
+ case 0x00: return "scsi";
+ case 0x01: return "ide";
+ case 0x02: return "fdc";
+ case 0x03: return "ipi";
+ case 0x04: return "raid";
+ case 0x05: return "ata";
+ case 0x06: return "sata";
+ case 0x07: return "sas";
+ default: return "mass-storage";
+ }
+ case 0x02:
+ switch(sub) {
+ case 0x00: return "ethernet";
+ case 0x01: return "token-ring";
+ case 0x02: return "fddi";
+ case 0x03: return "atm";
+ case 0x04: return "isdn";
+ case 0x05: return "worldfip";
+ case 0x06: return "picmg";
+ default: return "network";
+ }
+ case 0x03:
+ switch(sub) {
+ case 0x00: return "vga";
+ case 0x01: return "xga";
+ case 0x02: return "3d-controller";
+ default: return "display";
+ }
+ case 0x04:
+ switch(sub) {
+ case 0x00: return "video";
+ case 0x01: return "sound";
+ case 0x02: return "telephony";
+ default: return "multimedia-device";
+ }
+ case 0x05:
+ switch(sub) {
+ case 0x00: return "memory";
+ case 0x01: return "flash";
+ default: return "memory-controller";
+ }
+ case 0x06:
+ switch(sub) {
+ case 0x00: return "host";
+ case 0x01: return "isa";
+ case 0x02: return "eisa";
+ case 0x03: return "mca";
+ case 0x04: return "pci";
+ case 0x05: return "pcmcia";
+ case 0x06: return "nubus";
+ case 0x07: return "cardbus";
+ case 0x08: return "raceway";
+ case 0x09: return "semi-transparent-pci";
+ case 0x0a: return "infiniband";
+ default: return "unknown-bridge";
+ }
+ case 0x07:
+ switch(sub) {
+ case 0x00:
+ switch(pif) {
+ case 0x01: return "16450-serial";
+ case 0x02: return "16550-serial";
+ case 0x03: return "16650-serial";
+ case 0x04: return "16750-serial";
+ case 0x05: return "16850-serial";
+ case 0x06: return "16950-serial";
+ default: return "serial";
+ }
+ case 0x01:
+ switch(pif) {
+ case 0x01: return "bi-directional-parallel";
+ case 0x02: return "ecp-1.x-parallel";
+ case 0x03: return "ieee1284-controller";
+ case 0xfe: return "ieee1284-device";
+ default: return "parallel";
+ }
+ case 0x02: return "multiport-serial";
+ case 0x03:
+ switch(pif) {
+ case 0x01: return "16450-modem";
+ case 0x02: return "16550-modem";
+ case 0x03: return "16650-modem";
+ case 0x04: return "16750-modem";
+ default: return "modem";
+ }
+ case 0x04: return "gpib";
+ case 0x05: return "smart-card";
+ default: return "communication-controller";
+ }
+ case 0x08:
+ switch(sub) {
+ case 0x00:
+ switch(pif) {
+ case 0x01: return "isa-pic";
+ case 0x02: return "eisa-pic";
+ case 0x10: return "io-apic";
+ case 0x20: return "iox-apic";
+ default: return "interrupt-controller";
+ }
+ case 0x01:
+ switch(pif) {
+ case 0x01: return "isa-dma";
+ case 0x02: return "eisa-dma";
+ default: return "dma-controller";
+ }
+ case 0x02:
+ switch(pif) {
+ case 0x01: return "isa-system-timer";
+ case 0x02: return "eisa-system-timer";
+ default: return "timer";
+ }
+ case 0x03:
+ switch(pif) {
+ case 0x01: return "isa-rtc";
+ default: return "rtc";
+ }
+ case 0x04: return "hotplug-controller";
+ case 0x05: return "sd-host-controller";
+ default: return "system-peripheral";
+ }
+ case 0x09:
+ switch(sub) {
+ case 0x00: return "keyboard";
+ case 0x01: return "pen";
+ case 0x02: return "mouse";
+ case 0x03: return "scanner";
+ case 0x04: return "gameport";
+ default: return "input-controller";
+ }
+ case 0x0a:
+ switch(sub) {
+ case 0x00: return "clock";
+ default: return "docking-station";
+ }
+ case 0x0b:
+ switch(sub) {
+ case 0x00: return "386";
+ case 0x01: return "486";
+ case 0x02: return "pentium";
+ case 0x10: return "alpha";
+ case 0x20: return "powerpc";
+ case 0x30: return "mips";
+ case 0x40: return "co-processor";
+ default: return "cpu";
+ }
+ case 0x0c:
+ switch(sub) {
+ case 0x00: return "firewire";
+ case 0x01: return "access-bus";
+ case 0x02: return "ssa";
+ case 0x03:
+ switch(pif) {
+ case 0x00: return "usb-uhci";
+ case 0x10: return "usb-ohci";
+ case 0x20: return "usb-ehci";
+ case 0x30: return "usb-xhci";
+ case 0xfe: return "usb-device";
+ default: return "usb";
+ }
+ case 0x04: return "fibre-channel";
+ case 0x05: return "smb";
+ case 0x06: return "infiniband";
+ case 0x07:
+ switch(pif) {
+ case 0x00: return "impi-smic";
+ case 0x01: return "impi-kbrd";
+ case 0x02: return "impi-bltr";
+ default: return "impi";
+ }
+ case 0x08: return "secos";
+ case 0x09: return "canbus";
+ default: return "serial-bus";
+ }
+ case 0x0d:
+ switch(sub) {
+ case 0x00: return "irda";
+ case 0x01: return "consumer-ir";
+ case 0x10: return "rf-controller";
+ case 0x11: return "bluetooth";
+ case 0x12: return "broadband";
+ case 0x20: return "enet-802.11a";
+ case 0x21: return "enet-802.11b";
+ default: return "wireless-controller";
+ }
+ case 0x0e: return "intelligent-controller";
+ case 0x0f:
+ switch(sub) {
+ case 0x01: return "satellite-tv";
+ case 0x02: return "satellite-audio";
+ case 0x03: return "satellite-voice";
+ case 0x04: return "satellite-data";
+ default: return "satellite-device";
+ }
+ case 0x10:
+ switch(sub) {
+ case 0x00: return "network-encryption";
+ case 0x01: return "entertainment-encryption";
+ default: return "encryption";
+ }
+ case 0x011:
+ switch(sub) {
+ case 0x00: return "dpio";
+ case 0x01: return "counter";
+ case 0x10: return "measurement";
+ case 0x20: return "management-card";
+ default: return "data-processing";
+ }
+ }
+ return "device";
+}
+
+void pci_std_swizzle_irq_map(struct dt_node *np,
+ struct pci_device *pd,
+ struct pci_lsi_state *lstate,
+ uint8_t swizzle)
+{
+ uint32_t *map, *p;
+ int dev, irq;
+ size_t map_size;
+
+ /* Size in bytes of a target interrupt */
+ size_t isize = lstate->int_size * sizeof(uint32_t);
+
+ /* Calculate the size of a map entry:
+ *
+ * 3 cells : PCI Address
+ * 1 cell : PCI IRQ
+ * 1 cell : PIC phandle
+ * n cells : PIC irq (n = lstate->int_size)
+ *
+ * Assumption: PIC address is 0-size
+ */
+ int esize = 3 + 1 + 1 + lstate->int_size;
+
+ /* Number of map "device" entries
+ *
+ * A PCI Express root or downstream port needs only one
+ * entry for device 0. Anything else will get a full map
+ * for all possible 32 child device numbers
+ *
+ * If we have been passed a host bridge (pd == NULL) we also
+ * do a simple per-pin map
+ */
+ int edevcount;
+
+ if (!pd || (pd->dev_type == PCIE_TYPE_ROOT_PORT ||
+ pd->dev_type == PCIE_TYPE_SWITCH_DNPORT)) {
+ edevcount = 1;
+ dt_add_property_cells(np, "interrupt-map-mask", 0, 0, 0, 7);
+ } else {
+ edevcount = 32;
+ dt_add_property_cells(np, "interrupt-map-mask",
+ 0xf800, 0, 0, 7);
+ }
+ map_size = esize * edevcount * 4 * sizeof(uint32_t);
+ map = p = zalloc(map_size);
+
+ for (dev = 0; dev < edevcount; dev++) {
+ for (irq = 0; irq < 4; irq++) {
+ /* Calculate pin */
+ uint32_t new_irq = (irq + dev + swizzle) % 4;
+
+ /* PCI address portion */
+ *(p++) = dev << (8 + 3);
+ *(p++) = 0;
+ *(p++) = 0;
+
+ /* PCI interrupt portion */
+ *(p++) = irq + 1;
+
+ /* Parent phandle */
+ *(p++) = lstate->int_parent[new_irq];
+
+ /* Parent desc */
+ memcpy(p, lstate->int_val[new_irq], isize);
+ p += lstate->int_size;
+ }
+ }
+
+ dt_add_property(np, "interrupt-map", map, map_size);
+ free(map);
+}
+
+static void pci_add_slot_properties(struct phb *phb, struct pci_slot_info *info,
+ struct dt_node *np)
+{
+ char loc_code[LOC_CODE_SIZE];
+ size_t base_loc_code_len, slot_label_len;
+
+ if (phb->base_loc_code) {
+ base_loc_code_len = strlen(phb->base_loc_code);
+ slot_label_len = strlen(info->label);
+ if ((base_loc_code_len + slot_label_len +1) < LOC_CODE_SIZE) {
+ strcpy(loc_code, phb->base_loc_code);
+ strcat(loc_code, "-");
+ strcat(loc_code, info->label);
+ dt_add_property(np, "ibm,slot-location-code",
+ loc_code, strlen(loc_code) + 1);
+ } else
+ prerror("PCI: Loc Code too long - %zu + %zu + 1\n",
+ base_loc_code_len, slot_label_len);
+ } else
+ DBG("PCI: Base Loc code not found...\n");
+
+ /* Add other slot information */
+ dt_add_property_cells(np, "ibm,slot-pluggable", info->pluggable);
+ dt_add_property_cells(np, "ibm,slot-power-ctl", info->power_ctl);
+ dt_add_property_cells(np, "ibm,slot-wired-lanes", info->wired_lanes);
+ /*dt_add_property(np, "ibm,slot-bus-clock", &pd->slot_info->bus_clock, sizeof(uint8_t));*/
+ dt_add_property_cells(np, "ibm,slot-connector-type", info->connector_type);
+ dt_add_property_cells(np, "ibm,slot-card-desc", info->card_desc);
+ dt_add_property_cells(np, "ibm,slot-card-mech", info->card_mech);
+ dt_add_property_cells(np, "ibm,slot-pwr-led-ctl", info->pwr_led_ctl);
+ dt_add_property_cells(np, "ibm,slot-attn-led-ctl", info->attn_led_ctl);
+ dt_add_property_string(np, "ibm,slot-label", info->label);
+}
+
+static void pci_add_loc_code(struct dt_node *np)
+{
+ struct dt_node *p = np->parent;
+ const char *blcode = NULL;
+
+ /* Look for a parent with a slot-location-code */
+ while (p && !blcode) {
+ blcode = dt_prop_get_def(p, "ibm,slot-location-code", NULL);
+ p = p->parent;
+ }
+ if (!blcode)
+ return;
+ dt_add_property_string(np, "ibm,loc-code", blcode);
+}
+
+static void pci_print_summary_line(struct phb *phb, struct pci_device *pd,
+ struct dt_node *np, u32 rev_class,
+ const char *cname)
+{
+ const char *label, *dtype, *s;
+ u32 vdid;
+#define MAX_SLOTSTR 32
+ char slotstr[MAX_SLOTSTR + 1] = { 0, };
+
+ pci_cfg_read32(phb, pd->bdfn, 0, &vdid);
+
+ /* If it's a slot, it has a slot-label */
+ label = dt_prop_get_def(np, "ibm,slot-label", NULL);
+ if (label) {
+ u32 lanes = dt_prop_get_u32_def(np, "ibm,slot-wired-lanes", 0);
+ static const char *lanestrs[] = {
+ "", " x1", " x2", " x4", " x8", "x16", "x32", "32b", "64b"
+ };
+ const char *lstr = lanes > PCI_SLOT_WIRED_LANES_PCIX_64 ? "" : lanestrs[lanes];
+ snprintf(slotstr, MAX_SLOTSTR, "SLOT=%3s %s", label, lstr);
+ /* XXX Add more slot info */
+ } else {
+ /*
+ * No label, ignore downstream switch legs and root complex,
+ * Those would essentially be non-populated
+ */
+ if (pd->dev_type != PCIE_TYPE_ROOT_PORT &&
+ pd->dev_type != PCIE_TYPE_SWITCH_DNPORT) {
+ /* It's a mere device, get loc code */
+ s = dt_prop_get_def(np, "ibm,loc-code", NULL);
+ if (s)
+ snprintf(slotstr, MAX_SLOTSTR, "LOC_CODE=%s", s);
+ }
+ }
+
+ if (pci_has_cap(pd, PCI_CFG_CAP_ID_EXP, false)) {
+ static const char *pcie_types[] = {
+ "EP ", "LGCY", "????", "????", "ROOT", "SWUP", "SWDN",
+ "ETOX", "XTOE", "RINT", "EVTC" };
+ if (pd->dev_type >= ARRAY_SIZE(pcie_types))
+ dtype = "????";
+ else
+ dtype = pcie_types[pd->dev_type];
+ } else
+ dtype = pd->is_bridge ? "PCIB" : "PCID";
+
+ if (pd->is_bridge) {
+ uint8_t sec_bus, sub_bus;
+ pci_cfg_read8(phb, pd->bdfn, PCI_CFG_SECONDARY_BUS, &sec_bus);
+ pci_cfg_read8(phb, pd->bdfn, PCI_CFG_SUBORDINATE_BUS, &sub_bus);
+ printf(" %04x:%02x:%02x.%x [%s] %04x %04x R:%02x C:%06x B:%02x..%02x %s\n",
+ phb->opal_id, pd->bdfn >> 8, (pd->bdfn >> 3) & 0x1f,
+ pd->bdfn & 0x7, dtype, vdid & 0xffff, vdid >> 16,
+ rev_class & 0xff, rev_class >> 8, sec_bus, sub_bus, slotstr);
+ } else
+ printf(" %04x:%02x:%02x.%x [%s] %04x %04x R:%02x C:%06x (%14s) %s\n",
+ phb->opal_id, pd->bdfn >> 8, (pd->bdfn >> 3) & 0x1f,
+ pd->bdfn & 0x7, dtype, vdid & 0xffff, vdid >> 16,
+ rev_class & 0xff, rev_class >> 8, cname, slotstr);
+}
+
+
+static void pci_add_one_node(struct phb *phb, struct pci_device *pd,
+ struct dt_node *parent_node,
+ struct pci_lsi_state *lstate, uint8_t swizzle)
+{
+ struct pci_device *child;
+ struct dt_node *np;
+ const char *cname;
+#define MAX_NAME 256
+ char name[MAX_NAME];
+ char compat[MAX_NAME];
+ uint32_t rev_class, vdid;
+ uint32_t reg[5];
+ uint8_t intpin;
+
+ pci_cfg_read32(phb, pd->bdfn, 0, &vdid);
+ pci_cfg_read32(phb, pd->bdfn, PCI_CFG_REV_ID, &rev_class);
+ pci_cfg_read8(phb, pd->bdfn, PCI_CFG_INT_PIN, &intpin);
+
+ /*
+ * Quirk for IBM bridge bogus class on PCIe root complex.
+ * Without it, the PCI DN won't be created for its downstream
+ * devices in Linux.
+ */
+ if (pci_has_cap(pd, PCI_CFG_CAP_ID_EXP, false) &&
+ parent_node == phb->dt_node)
+ rev_class = (rev_class & 0xff) | 0x6040000;
+ cname = pci_class_name(rev_class >> 8);
+
+ if (pd->bdfn & 0x7)
+ snprintf(name, MAX_NAME - 1, "%s@%x,%x",
+ cname, (pd->bdfn >> 3) & 0x1f, pd->bdfn & 0x7);
+ else
+ snprintf(name, MAX_NAME - 1, "%s@%x",
+ cname, (pd->bdfn >> 3) & 0x1f);
+ np = dt_new(parent_node, name);
+
+ /* XXX FIXME: make proper "compatible" properties */
+ if (pci_has_cap(pd, PCI_CFG_CAP_ID_EXP, false)) {
+ snprintf(compat, MAX_NAME, "pciex%x,%x",
+ vdid & 0xffff, vdid >> 16);
+ dt_add_property_cells(np, "ibm,pci-config-space-type", 1);
+ } else {
+ snprintf(compat, MAX_NAME, "pci%x,%x",
+ vdid & 0xffff, vdid >> 16);
+ dt_add_property_cells(np, "ibm,pci-config-space-type", 0);
+ }
+ dt_add_property_cells(np, "class-code", rev_class >> 8);
+ dt_add_property_cells(np, "revision-id", rev_class & 0xff);
+ dt_add_property_cells(np, "vendor-id", vdid & 0xffff);
+ dt_add_property_cells(np, "device-id", vdid >> 16);
+ if (intpin)
+ dt_add_property_cells(np, "interrupts", intpin);
+
+ /* XXX FIXME: Add a few missing ones such as
+ *
+ * - devsel-speed (!express)
+ * - max-latency
+ * - min-grant
+ * - subsystem-id
+ * - subsystem-vendor-id
+ * - ...
+ */
+
+ /* Add slot properties if needed */
+ if (pd->slot_info)
+ pci_add_slot_properties(phb, pd->slot_info, np);
+
+ /* Make up location code */
+ pci_add_loc_code(np);
+
+ /* XXX FIXME: We don't look for BARs, we only put the config space
+ * entry in the "reg" property. That's enough for Linux and we might
+ * even want to make this legit in future ePAPR
+ */
+ reg[0] = pd->bdfn << 8;
+ reg[1] = reg[2] = reg[3] = reg[4] = 0;
+ dt_add_property(np, "reg", reg, sizeof(reg));
+
+ /* Print summary info about the device */
+ pci_print_summary_line(phb, pd, np, rev_class, cname);
+
+ if (!pd->is_bridge)
+ return;
+
+ dt_add_property_cells(np, "#address-cells", 3);
+ dt_add_property_cells(np, "#size-cells", 2);
+ dt_add_property_cells(np, "#interrupt-cells", 1);
+
+ /* We want "device_type" for bridges */
+ if (pci_has_cap(pd, PCI_CFG_CAP_ID_EXP, false))
+ dt_add_property_string(np, "device_type", "pciex");
+ else
+ dt_add_property_string(np, "device_type", "pci");
+
+ /* Update the current interrupt swizzling level based on our own
+ * device number
+ */
+ swizzle = (swizzle + ((pd->bdfn >> 3) & 0x1f)) & 3;
+
+ /* We generate a standard-swizzling interrupt map. This is pretty
+ * big, we *could* try to be smarter for things that aren't hotplug
+ * slots at least and only populate those entries for which there's
+ * an actual children (especially on PCI Express), but for now that
+ * will do
+ */
+ pci_std_swizzle_irq_map(np, pd, lstate, swizzle);
+
+ /* We do an empty ranges property for now, we haven't setup any
+ * bridge windows, the kernel will deal with that
+ *
+ * XXX The kernel should probably fix that up
+ */
+ dt_add_property(np, "ranges", NULL, 0);
+
+ list_for_each(&pd->children, child, link)
+ pci_add_one_node(phb, child, np, lstate, swizzle);
+}
+
+static void pci_add_nodes(struct phb *phb)
+{
+ struct pci_lsi_state *lstate = &phb->lstate;
+ struct pci_device *pd;
+
+ /* If the PHB has its own slot info, add them */
+ if (phb->slot_info)
+ pci_add_slot_properties(phb, phb->slot_info, NULL);
+
+ /* Add all child devices */
+ list_for_each(&phb->devices, pd, link)
+ pci_add_one_node(phb, pd, phb->dt_node, lstate, 0);
+}
+
+static void __pci_reset(struct list_head *list)
+{
+ struct pci_device *pd;
+
+ while ((pd = list_pop(list, struct pci_device, link)) != NULL) {
+ __pci_reset(&pd->children);
+ free(pd);
+ }
+}
+
+void pci_reset(void)
+{
+ unsigned int i;
+
+ printf("PCI: Clearing all devices...\n");
+
+ lock(&pci_lock);
+
+ /* XXX Do those in parallel (at least the power up
+ * state machine could be done in parallel)
+ */
+ for (i = 0; i < PCI_MAX_PHBs; i++) {
+ if (!phbs[i])
+ continue;
+ __pci_reset(&phbs[i]->devices);
+ }
+ unlock(&pci_lock);
+}
+
+void pci_init_slots(void)
+{
+ unsigned int i;
+
+ printf("PCI: Probing PHB slots...\n");
+
+ lock(&pci_lock);
+
+ /* XXX Do those in parallel (at least the power up
+ * state machine could be done in parallel)
+ */
+ for (i = 0; i < PCI_MAX_PHBs; i++) {
+ if (!phbs[i])
+ continue;
+ pci_init_slot(phbs[i]);
+ }
+
+ if (platform.pci_probe_complete)
+ platform.pci_probe_complete();
+
+ printf("PCI: Summary\n");
+ for (i = 0; i < PCI_MAX_PHBs; i++) {
+ if (!phbs[i])
+ continue;
+ pci_add_nodes(phbs[i]);
+ }
+ unlock(&pci_lock);
+}
+
+static struct pci_device *__pci_walk_dev(struct phb *phb,
+ struct list_head *l,
+ int (*cb)(struct phb *,
+ struct pci_device *,
+ void *),
+ void *userdata)
+{
+ struct pci_device *pd, *child;
+
+ if (list_empty(l))
+ return NULL;
+
+ list_for_each(l, pd, link) {
+ if (cb && cb(phb, pd, userdata))
+ return pd;
+
+ child = __pci_walk_dev(phb, &pd->children, cb, userdata);
+ if (child)
+ return child;
+ }
+
+ return NULL;
+}
+
+struct pci_device *pci_walk_dev(struct phb *phb,
+ int (*cb)(struct phb *,
+ struct pci_device *,
+ void *),
+ void *userdata)
+{
+ return __pci_walk_dev(phb, &phb->devices, cb, userdata);
+}
+
+static int __pci_find_dev(struct phb *phb,
+ struct pci_device *pd, void *userdata)
+{
+ uint16_t bdfn = *((uint16_t *)userdata);
+
+ if (!phb || !pd)
+ return 0;
+
+ if (pd->bdfn == bdfn)
+ return 1;
+
+ return 0;
+}
+
+struct pci_device *pci_find_dev(struct phb *phb, uint16_t bdfn)
+{
+ return pci_walk_dev(phb, __pci_find_dev, &bdfn);
+}
diff --git a/core/platform.c b/core/platform.c
new file mode 100644
index 0000000..e54b334
--- /dev/null
+++ b/core/platform.c
@@ -0,0 +1,78 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <opal.h>
+#include <console.h>
+
+/*
+ * Various wrappers for platform functions
+ */
+static int64_t opal_cec_power_down(uint64_t request)
+{
+ printf("OPAL: Shutdown request type 0x%llx...\n", request);
+
+ if (platform.cec_power_down)
+ return platform.cec_power_down(request);
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_CEC_POWER_DOWN, opal_cec_power_down, 1);
+
+static int64_t opal_cec_reboot(void)
+{
+ printf("OPAL: Reboot request...\n");
+
+#ifdef ENABLE_FAST_RESET
+ /* Try a fast reset first */
+ fast_reset();
+#endif
+ if (platform.cec_reboot)
+ return platform.cec_reboot();
+
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_CEC_REBOOT, opal_cec_reboot, 0);
+
+static void generic_platform_init(void)
+{
+ /* Do we want to unconditionally enable it ? */
+ if (dummy_console_enabled())
+ dummy_console_add_nodes();
+}
+
+static struct platform generic_platform = {
+ .name = "generic",
+ .init = generic_platform_init,
+};
+
+void probe_platform(void)
+{
+ struct platform *platforms = &__platforms_start;
+ unsigned int i;
+
+ platform = generic_platform;
+
+ for (i = 0; &platforms[i] < &__platforms_end; i++) {
+ if (platforms[i].probe && platforms[i].probe()) {
+ platform = platforms[i];
+ break;
+ }
+ }
+
+ printf("PLAT: Detected %s platform\n", platform.name);
+}
diff --git a/core/relocate.c b/core/relocate.c
new file mode 100644
index 0000000..f6bda37
--- /dev/null
+++ b/core/relocate.c
@@ -0,0 +1,65 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdbool.h>
+#include <elf.h>
+
+/* WARNING: This code is used to self-relocate, it cannot have any
+ * global reference nor TOC reference. It's also called before BSS
+ * is cleared.
+ */
+
+/* Called from head.S, thus no header. */
+int relocate(uint64_t offset, struct elf64_dyn *dyn, struct elf64_rela *rela);
+
+/* Note: This code is simplified according to the assumptions
+ * that our link address is 0 and we are running at the
+ * target address already.
+ */
+int relocate(uint64_t offset, struct elf64_dyn *dyn, struct elf64_rela *rela)
+{
+ uint64_t dt_rela = 0;
+ uint64_t dt_relacount = 0;
+ unsigned int i;
+
+ /* Look for relocation table */
+ for (; dyn->d_tag != DT_NULL; dyn++) {
+ if (dyn->d_tag == DT_RELA)
+ dt_rela = dyn->d_val;
+ else if (dyn->d_tag == DT_RELACOUNT)
+ dt_relacount = dyn->d_val;
+ }
+
+ /* If we miss either rela or relacount, bail */
+ if (!dt_rela || !dt_relacount)
+ return false;
+
+ /* Check if the offset is consistent */
+ if ((offset + dt_rela) != (uint64_t)rela)
+ return false;
+
+ /* Perform relocations */
+ for (i = 0; i < dt_relacount; i++, rela++) {
+ uint64_t *t;
+
+ if (ELF64_R_TYPE(rela->r_info) != R_PPC64_RELATIVE)
+ return false;
+ t = (uint64_t *)(rela->r_offset + offset);
+ *t = rela->r_addend + offset;
+ }
+
+ return true;
+}
diff --git a/core/test/Makefile.check b/core/test/Makefile.check
new file mode 100644
index 0000000..37dac46
--- /dev/null
+++ b/core/test/Makefile.check
@@ -0,0 +1,29 @@
+# -*-Makefile-*-
+CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg
+
+check: $(CORE_TEST:%=%-check)
+
+$(CORE_TEST:%=%-check) : %-check: %
+ $(VALGRIND) $<
+
+core/test/stubs.o: core/test/stubs.c
+ $(HOSTCC) $(HOSTCFLAGS) -g -c -o $@ $<
+
+$(CORE_TEST) : core/test/stubs.o
+
+$(CORE_TEST) : % : %.c
+ $(HOSTCC) $(HOSTCFLAGS) -O0 -g -I include -I . -I libfdt -o $@ $< core/test/stubs.o
+
+$(CORE_TEST): % : %.d
+
+core/test/stubs.o: core/test/stubs.d
+
+core/test/%.d: core/test/%.c
+ $(HOSTCC) $(HOSTCFLAGS) -I include -I . -I libfdt -M $< > $@
+
+-include core/test/*.d
+
+clean: core-test-clean
+
+core-test-clean:
+ $(RM) -f core/test/*.[od] $(CORE_TEST)
diff --git a/core/test/run-device.c b/core/test/run-device.c
new file mode 100644
index 0000000..fa9e951
--- /dev/null
+++ b/core/test/run-device.c
@@ -0,0 +1,118 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+
+/* Override this for testing. */
+#define is_rodata(p) fake_is_rodata(p)
+
+char __rodata_start[16];
+#define __rodata_end (__rodata_start + sizeof(__rodata_start))
+
+static inline bool fake_is_rodata(const void *p)
+{
+ return ((char *)p >= __rodata_start && (char *)p < __rodata_end);
+}
+
+#define zalloc(bytes) calloc((bytes), 1)
+
+#include "../device.c"
+#include "../../ccan/list/list.c" /* For list_check */
+#include <assert.h>
+
+int main(void)
+{
+ struct dt_node *root, *c1, *c2, *gc1, *gc2, *gc3, *ggc1, *i;
+ const struct dt_property *p;
+ struct dt_property *p2;
+ unsigned int n;
+
+ root = dt_new_root("root");
+ assert(!list_top(&root->properties, struct dt_property, list));
+ c1 = dt_new(root, "c1");
+ assert(!list_top(&c1->properties, struct dt_property, list));
+ c2 = dt_new(root, "c2");
+ assert(!list_top(&c2->properties, struct dt_property, list));
+ gc1 = dt_new(c1, "gc1");
+ assert(!list_top(&gc1->properties, struct dt_property, list));
+ gc2 = dt_new(c1, "gc2");
+ assert(!list_top(&gc2->properties, struct dt_property, list));
+ gc3 = dt_new(c1, "gc3");
+ assert(!list_top(&gc3->properties, struct dt_property, list));
+ ggc1 = dt_new(gc1, "ggc1");
+ assert(!list_top(&ggc1->properties, struct dt_property, list));
+
+ for (n = 0, i = dt_first(root); i; i = dt_next(root, i), n++) {
+ assert(!list_top(&i->properties, struct dt_property, list));
+ dt_add_property_cells(i, "visited", 1);
+ }
+ assert(n == 6);
+
+ for (n = 0, i = dt_first(root); i; i = dt_next(root, i), n++) {
+ p = list_top(&i->properties, struct dt_property, list);
+ assert(strcmp(p->name, "visited") == 0);
+ assert(p->len == sizeof(u32));
+ assert(fdt32_to_cpu(*(u32 *)p->prop) == 1);
+ }
+ assert(n == 6);
+
+ dt_add_property_cells(c1, "some-property", 1, 2, 3);
+ p = dt_find_property(c1, "some-property");
+ assert(p);
+ assert(strcmp(p->name, "some-property") == 0);
+ assert(p->len == sizeof(u32) * 3);
+ assert(fdt32_to_cpu(*(u32 *)p->prop) == 1);
+ assert(fdt32_to_cpu(*((u32 *)p->prop + 1)) == 2);
+ assert(fdt32_to_cpu(*((u32 *)p->prop + 2)) == 3);
+
+ /* Test freeing a single node */
+ assert(!list_empty(&gc1->children));
+ dt_free(ggc1);
+ assert(list_empty(&gc1->children));
+
+ /* Test rodata logic. */
+ assert(!is_rodata("hello"));
+ assert(is_rodata(__rodata_start));
+ strcpy(__rodata_start, "name");
+ ggc1 = dt_new(root, __rodata_start);
+ assert(ggc1->name == __rodata_start);
+
+ /* Test string node. */
+ dt_add_property_string(ggc1, "somestring", "someval");
+ assert(dt_has_node_property(ggc1, "somestring", "someval"));
+ assert(!dt_has_node_property(ggc1, "somestrin", "someval"));
+ assert(!dt_has_node_property(ggc1, "somestring", "someva"));
+ assert(!dt_has_node_property(ggc1, "somestring", "somevale"));
+
+ /* Test resizing property. */
+ p = p2 = __dt_find_property(c1, "some-property");
+ assert(p);
+ n = p2->len;
+ while (p2 == p) {
+ n *= 2;
+ dt_resize_property(&p2, n);
+ }
+
+ assert(dt_find_property(c1, "some-property") == p2);
+ list_check(&c1->properties, "properties after resizing");
+
+ dt_del_property(c1, p2);
+ list_check(&c1->properties, "properties after delete");
+
+ /* No leaks for valgrind! */
+ dt_free(root);
+ return 0;
+}
diff --git a/core/test/run-malloc-speed.c b/core/test/run-malloc-speed.c
new file mode 100644
index 0000000..edc7589
--- /dev/null
+++ b/core/test/run-malloc-speed.c
@@ -0,0 +1,94 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+/* Use these before we undefine them below. */
+static inline void *real_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void real_free(void *p)
+{
+ return free(p);
+}
+
+#include <skiboot.h>
+
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../malloc.c"
+#include "../mem_region.c"
+#include "../device.c"
+
+#undef malloc
+#undef free
+#undef realloc
+
+#include <assert.h>
+#include <stdio.h>
+
+char __rodata_start[1], __rodata_end[1];
+struct dt_node *dt_root;
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+#define TEST_HEAP_ORDER 27
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+#define NUM_ALLOCS 4096
+
+int main(void)
+{
+ uint64_t i, len;
+ void *p[NUM_ALLOCS];
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)real_malloc(skiboot_heap.len);
+
+ len = skiboot_heap.len / NUM_ALLOCS - sizeof(struct alloc_hdr);
+ for (i = 0; i < NUM_ALLOCS; i++) {
+ p[i] = __malloc(len, __location__);
+ assert(p[i] > region_start(&skiboot_heap));
+ assert(p[i] + len <= region_start(&skiboot_heap)
+ + skiboot_heap.len);
+ }
+ assert(mem_check(&skiboot_heap));
+ assert(mem_region_lock.lock_val == 0);
+ free(region_start(&skiboot_heap));
+ return 0;
+}
diff --git a/core/test/run-malloc.c b/core/test/run-malloc.c
new file mode 100644
index 0000000..226ce75
--- /dev/null
+++ b/core/test/run-malloc.c
@@ -0,0 +1,144 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <skiboot.h>
+
+#define is_rodata(p) true
+
+#include "../mem_region.c"
+#include "../malloc.c"
+#include "../device.c"
+
+#include "mem_region-malloc.h"
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+struct dt_node *dt_root;
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+static bool heap_empty(void)
+{
+ const struct alloc_hdr *h = region_start(&skiboot_heap);
+ return h->num_longs == skiboot_heap.len / sizeof(long);
+}
+
+int main(void)
+{
+ char test_heap[TEST_HEAP_SIZE], *p, *p2, *p3, *p4;
+ size_t i;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)test_heap;
+ skiboot_heap.len = TEST_HEAP_SIZE;
+
+ /* Allocations of various sizes. */
+ for (i = 0; i < TEST_HEAP_ORDER; i++) {
+ p = malloc(1ULL << i);
+ assert(p);
+ assert(p > (char *)test_heap);
+ assert(p + (1ULL << i) <= (char *)test_heap + TEST_HEAP_SIZE);
+ assert(!mem_region_lock.lock_val);
+ free(p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+ }
+
+ /* Realloc as malloc. */
+ mem_region_lock.lock_val = 0;
+ p = realloc(NULL, 100);
+ assert(p);
+ assert(!mem_region_lock.lock_val);
+
+ /* Realloc as free. */
+ p = realloc(p, 0);
+ assert(!p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+
+ /* Realloc longer. */
+ p = realloc(NULL, 100);
+ assert(p);
+ assert(!mem_region_lock.lock_val);
+ p2 = realloc(p, 200);
+ assert(p2 == p);
+ assert(!mem_region_lock.lock_val);
+ free(p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+
+ /* Realloc shorter. */
+ mem_region_lock.lock_val = 0;
+ p = realloc(NULL, 100);
+ assert(!mem_region_lock.lock_val);
+ assert(p);
+ p2 = realloc(p, 1);
+ assert(!mem_region_lock.lock_val);
+ assert(p2 == p);
+ free(p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+
+ /* Realloc with move. */
+ p2 = malloc(TEST_HEAP_SIZE - 64 - sizeof(struct alloc_hdr)*2);
+ assert(p2);
+ p = malloc(64);
+ assert(p);
+ free(p2);
+
+ p2 = realloc(p, 128);
+ assert(p2 != p);
+ free(p2);
+ assert(heap_empty());
+ assert(!mem_region_lock.lock_val);
+
+ /* Reproduce bug BZ109128/SW257364 */
+ p = malloc(100);
+ p2 = malloc(100);
+ p3 = malloc(100);
+ p4 = malloc(100);
+ free(p2);
+ realloc(p,216);
+ free(p3);
+ free(p);
+ free(p4);
+ assert(heap_empty());
+ assert(!mem_region_lock.lock_val);
+
+ return 0;
+}
diff --git a/core/test/run-mem_region.c b/core/test/run-mem_region.c
new file mode 100644
index 0000000..f0ad2c2
--- /dev/null
+++ b/core/test/run-mem_region.c
@@ -0,0 +1,250 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+#include <string.h>
+
+/* Use these before we override definitions below. */
+static void *__malloc(size_t size, const char *location __attribute__((unused)))
+{
+ return malloc(size);
+}
+
+static void *__realloc(void *ptr, size_t size, const char *location __attribute__((unused)))
+{
+ return realloc(ptr, size);
+}
+
+static inline void __free(void *p, const char *location __attribute__((unused)))
+{
+ return free(p);
+}
+
+static void *__zalloc(size_t size, const char *location __attribute__((unused)))
+{
+ void *ptr = malloc(size);
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+#include <skiboot.h>
+
+#define is_rodata(p) true
+
+#include "../mem_region.c"
+#include "../device.c"
+
+#include <assert.h>
+#include <stdio.h>
+
+struct dt_node *dt_root;
+
+void lock(struct lock *l)
+{
+ l->lock_val++;
+}
+
+void unlock(struct lock *l)
+{
+ l->lock_val--;
+}
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static bool heap_empty(void)
+{
+ const struct alloc_hdr *h = region_start(&skiboot_heap);
+ return h->num_longs == skiboot_heap.len / sizeof(long);
+}
+
+int main(void)
+{
+ char *test_heap;
+ void *p, *ptrs[100];
+ size_t i;
+ struct mem_region *r;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ test_heap = __malloc(TEST_HEAP_SIZE, __location__);
+ skiboot_heap.start = (unsigned long)test_heap;
+ skiboot_heap.len = TEST_HEAP_SIZE;
+
+ /* Allocations of various sizes. */
+ for (i = 0; i < TEST_HEAP_ORDER; i++) {
+ p = mem_alloc(&skiboot_heap, 1ULL << i, 1, "here");
+ assert(p);
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "here"));
+ assert(p > (void *)test_heap);
+ assert(p + (1ULL << i) <= (void *)test_heap + TEST_HEAP_SIZE);
+ assert(mem_size(&skiboot_heap, p) >= 1ULL << i);
+ mem_free(&skiboot_heap, p, "freed");
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "freed"));
+ }
+ p = mem_alloc(&skiboot_heap, 1ULL << i, 1, "here");
+ assert(!p);
+ mem_free(&skiboot_heap, p, "freed");
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+
+ /* Allocations of various alignments: use small alloc first. */
+ ptrs[0] = mem_alloc(&skiboot_heap, 1, 1, "small");
+ for (i = 0; ; i++) {
+ p = mem_alloc(&skiboot_heap, 1, 1ULL << i, "here");
+ assert(mem_check(&skiboot_heap));
+ /* We will eventually fail... */
+ if (!p) {
+ assert(i >= TEST_HEAP_ORDER);
+ break;
+ }
+ assert(p);
+ assert((long)p % (1ULL << i) == 0);
+ assert(p > (void *)test_heap);
+ assert(p + 1 <= (void *)test_heap + TEST_HEAP_SIZE);
+ mem_free(&skiboot_heap, p, "freed");
+ assert(mem_check(&skiboot_heap));
+ }
+ mem_free(&skiboot_heap, ptrs[0], "small freed");
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+
+ /* Many little allocations, freed in reverse order. */
+ for (i = 0; i < 100; i++) {
+ ptrs[i] = mem_alloc(&skiboot_heap, sizeof(long), 1, "here");
+ assert(ptrs[i]);
+ assert(ptrs[i] > (void *)test_heap);
+ assert(ptrs[i] + sizeof(long)
+ <= (void *)test_heap + TEST_HEAP_SIZE);
+ assert(mem_check(&skiboot_heap));
+ }
+ for (i = 0; i < 100; i++)
+ mem_free(&skiboot_heap, ptrs[100 - 1 - i], "freed");
+
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+
+ /* Check the prev_free gets updated properly. */
+ ptrs[0] = mem_alloc(&skiboot_heap, sizeof(long), 1, "ptrs[0]");
+ ptrs[1] = mem_alloc(&skiboot_heap, sizeof(long), 1, "ptrs[1]");
+ assert(ptrs[1] > ptrs[0]);
+ mem_free(&skiboot_heap, ptrs[0], "ptrs[0] free");
+ assert(mem_check(&skiboot_heap));
+ ptrs[0] = mem_alloc(&skiboot_heap, sizeof(long), 1, "ptrs[0] again");
+ assert(mem_check(&skiboot_heap));
+ mem_free(&skiboot_heap, ptrs[1], "ptrs[1] free");
+ mem_free(&skiboot_heap, ptrs[0], "ptrs[0] free");
+ assert(mem_check(&skiboot_heap));
+ assert(heap_empty());
+
+#if 0
+ printf("Heap map:\n");
+ for (i = 0; i < TEST_HEAP_SIZE / sizeof(long); i++) {
+ printf("%u", test_bit(skiboot_heap.bitmap, i));
+ if (i % 64 == 63)
+ printf("\n");
+ else if (i % 8 == 7)
+ printf(" ");
+ }
+#endif
+
+ /* Simple enlargement, then free */
+ p = mem_alloc(&skiboot_heap, 1, 1, "one byte");
+ assert(p);
+ assert(mem_resize(&skiboot_heap, p, 100, "hundred bytes"));
+ assert(mem_size(&skiboot_heap, p) >= 100);
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "hundred bytes"));
+ mem_free(&skiboot_heap, p, "freed");
+
+ /* Simple shrink, then free */
+ p = mem_alloc(&skiboot_heap, 100, 1, "100 bytes");
+ assert(p);
+ assert(mem_resize(&skiboot_heap, p, 1, "1 byte"));
+ assert(mem_size(&skiboot_heap, p) < 100);
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "1 byte"));
+ mem_free(&skiboot_heap, p, "freed");
+
+ /* Lots of resizing (enlarge). */
+ p = mem_alloc(&skiboot_heap, 1, 1, "one byte");
+ assert(p);
+ for (i = 1; i <= TEST_HEAP_SIZE - sizeof(struct alloc_hdr); i++) {
+ assert(mem_resize(&skiboot_heap, p, i, "enlarge"));
+ assert(mem_size(&skiboot_heap, p) >= i);
+ assert(mem_check(&skiboot_heap));
+ }
+
+ /* Can't make it larger though. */
+ assert(!mem_resize(&skiboot_heap, p, i, "enlarge"));
+
+ for (i = TEST_HEAP_SIZE - sizeof(struct alloc_hdr); i > 0; i--) {
+ assert(mem_resize(&skiboot_heap, p, i, "shrink"));
+ assert(mem_check(&skiboot_heap));
+ }
+
+ mem_free(&skiboot_heap, p, "freed");
+ assert(mem_check(&skiboot_heap));
+
+ /* Test splitting of a region. */
+ r = new_region("base", (unsigned long)test_heap,
+ TEST_HEAP_SIZE, NULL, REGION_SKIBOOT_HEAP);
+ assert(add_region(r));
+ r = new_region("splitter", (unsigned long)test_heap + TEST_HEAP_SIZE/4,
+ TEST_HEAP_SIZE/2, NULL, REGION_RESERVED);
+ assert(add_region(r));
+ /* Now we should have *three* regions. */
+ i = 0;
+ list_for_each(&regions, r, list) {
+ if (region_start(r) == test_heap) {
+ assert(r->len == TEST_HEAP_SIZE/4);
+ assert(strcmp(r->name, "base") == 0);
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ } else if (region_start(r) == test_heap + TEST_HEAP_SIZE / 4) {
+ assert(r->len == TEST_HEAP_SIZE/2);
+ assert(strcmp(r->name, "splitter") == 0);
+ assert(r->type == REGION_RESERVED);
+ assert(!r->free_list.n.next);
+ } else if (region_start(r) == test_heap + TEST_HEAP_SIZE/4*3) {
+ assert(r->len == TEST_HEAP_SIZE/4);
+ assert(strcmp(r->name, "base") == 0);
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ } else
+ abort();
+ assert(mem_check(r));
+ i++;
+ }
+ assert(i == 3);
+ while ((r = list_pop(&regions, struct mem_region, list)) != NULL) {
+ list_del(&r->list);
+ mem_free(&skiboot_heap, r, __location__);
+ }
+ assert(mem_region_lock.lock_val == 0);
+ __free(test_heap, "");
+ return 0;
+}
diff --git a/core/test/run-mem_region_init.c b/core/test/run-mem_region_init.c
new file mode 100644
index 0000000..a24cc7b
--- /dev/null
+++ b/core/test/run-mem_region_init.c
@@ -0,0 +1,179 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+/* Use these before we undefine them below. */
+static inline void *real_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void real_free(void *p)
+{
+ return free(p);
+}
+
+#include "../malloc.c"
+
+#include <skiboot.h>
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../mem_region.c"
+
+/* But we need device tree to make copies of names. */
+#undef is_rodata
+#define is_rodata(p) false
+
+static inline char *skiboot_strdup(const char *str)
+{
+ char *ret = __malloc(strlen(str) + 1, "");
+ return memcpy(ret, str, strlen(str) + 1);
+}
+#undef strdup
+#define strdup skiboot_strdup
+
+#include "../device.c"
+
+#include <skiboot.h>
+
+#include <assert.h>
+#include <stdio.h>
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+/* We actually need a lot of room for the bitmaps! */
+#define TEST_HEAP_ORDER 27
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static void add_mem_node(uint64_t start, uint64_t len)
+{
+ struct dt_node *mem;
+ u64 reg[2];
+ char name[sizeof("memory@") + STR_MAX_CHARS(reg[0])];
+
+ /* reg contains start and length */
+ reg[0] = cpu_to_be64(start);
+ reg[1] = cpu_to_be64(len);
+
+ sprintf(name, "memory@%llx", (unsigned long long)start);
+
+ mem = dt_new(dt_root, name);
+ assert(mem);
+ dt_add_property_string(mem, "device_type", "memory");
+ dt_add_property(mem, "reg", reg, sizeof(reg));
+}
+
+void add_chip_dev_associativity(struct dt_node *dev __attribute__((unused)))
+{
+}
+
+int main(void)
+{
+ uint64_t end;
+ int builtins;
+ struct mem_region *r;
+ char *heap = real_malloc(TEST_HEAP_SIZE);
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)heap;
+ skiboot_heap.len = TEST_HEAP_SIZE;
+ skiboot_os_reserve.len = (unsigned long)heap;
+
+ dt_root = dt_new_root("");
+ dt_add_property_cells(dt_root, "#address-cells", 2);
+ dt_add_property_cells(dt_root, "#size-cells", 2);
+
+ /* Make sure we overlap the heap, at least. */
+ add_mem_node(0, 0x100000000ULL);
+ add_mem_node(0x100000000ULL, 0x100000000ULL);
+ end = 0x200000000ULL;
+
+ /* Now convert. */
+ mem_region_init();
+ assert(mem_check(&skiboot_heap));
+
+ builtins = 0;
+ list_for_each(&regions, r, list) {
+ /* Regions must not overlap. */
+ struct mem_region *r2, *pre = NULL, *post = NULL;
+ list_for_each(&regions, r2, list) {
+ if (r == r2)
+ continue;
+ assert(!overlaps(r, r2));
+ }
+
+ /* But should have exact neighbours. */
+ list_for_each(&regions, r2, list) {
+ if (r == r2)
+ continue;
+ if (r2->start == r->start + r->len)
+ post = r2;
+ if (r2->start + r2->len == r->start)
+ pre = r2;
+ }
+ assert(r->start == 0 || pre);
+ assert(r->start + r->len == end || post);
+
+ if (r == &skiboot_code_and_text ||
+ r == &skiboot_heap ||
+ r == &skiboot_after_heap ||
+ r == &skiboot_cpu_stacks ||
+ r == &skiboot_os_reserve)
+ builtins++;
+ else
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ assert(mem_check(r));
+ }
+ assert(builtins == 5);
+
+ dt_free(dt_root);
+
+ while ((r = list_pop(&regions, struct mem_region, list)) != NULL) {
+ list_del(&r->list);
+ if (r != &skiboot_code_and_text &&
+ r != &skiboot_heap &&
+ r != &skiboot_after_heap &&
+ r != &skiboot_os_reserve &&
+ r != &skiboot_cpu_stacks) {
+ free(r);
+ }
+ assert(mem_check(&skiboot_heap));
+ }
+ assert(mem_region_lock.lock_val == 0);
+ real_free(heap);
+ return 0;
+}
diff --git a/core/test/run-mem_region_release_unused.c b/core/test/run-mem_region_release_unused.c
new file mode 100644
index 0000000..e73cf25
--- /dev/null
+++ b/core/test/run-mem_region_release_unused.c
@@ -0,0 +1,177 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+static void *__malloc(size_t size, const char *location __attribute__((unused)))
+{
+ return malloc(size);
+}
+
+static void *__realloc(void *ptr, size_t size, const char *location __attribute__((unused)))
+{
+ return realloc(ptr, size);
+}
+
+static void *__zalloc(size_t size, const char *location __attribute__((unused)))
+{
+ return calloc(size, 1);
+}
+
+static inline void __free(void *p, const char *location __attribute__((unused)))
+{
+ return free(p);
+}
+
+#include <skiboot.h>
+
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../mem_region.c"
+
+/* But we need device tree to make copies of names. */
+#undef is_rodata
+#define is_rodata(p) false
+
+#include "../device.c"
+#include <assert.h>
+#include <stdio.h>
+
+void lock(struct lock *l)
+{
+ l->lock_val++;
+}
+
+void unlock(struct lock *l)
+{
+ l->lock_val--;
+}
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static void add_mem_node(uint64_t start, uint64_t len)
+{
+ struct dt_node *mem;
+ u64 reg[2];
+ char name[sizeof("memory@") + STR_MAX_CHARS(reg[0])];
+
+ /* reg contains start and length */
+ reg[0] = cpu_to_be64(start);
+ reg[1] = cpu_to_be64(len);
+
+ sprintf(name, "memory@%llx", (long long)start);
+
+ mem = dt_new(dt_root, name);
+ dt_add_property_string(mem, "device_type", "memory");
+ dt_add_property(mem, "reg", reg, sizeof(reg));
+}
+
+void add_chip_dev_associativity(struct dt_node *dev __attribute__((unused)))
+{
+}
+
+int main(void)
+{
+ uint64_t i;
+ struct mem_region *r, *other = NULL;
+ void *other_mem;
+ const char *last;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)malloc(TEST_HEAP_SIZE);
+ skiboot_heap.len = TEST_HEAP_SIZE;
+ skiboot_os_reserve.len = skiboot_heap.start;
+
+ dt_root = dt_new_root("");
+ dt_add_property_cells(dt_root, "#address-cells", 2);
+ dt_add_property_cells(dt_root, "#size-cells", 2);
+
+ other_mem = malloc(1024*1024);
+ add_mem_node((unsigned long)other_mem, 1024*1024);
+
+ /* Now convert. */
+ mem_region_init();
+
+ /* Find our node to allocate from */
+ list_for_each(&regions, r, list) {
+ if (region_start(r) == other_mem)
+ other = r;
+ }
+ /* This could happen if skiboot addresses clashed with our alloc. */
+ assert(other);
+ assert(mem_check(other));
+
+ /* Allocate 1k from other region. */
+ mem_alloc(other, 1024, 1, "1k");
+ mem_region_release_unused();
+
+ assert(mem_check(&skiboot_heap));
+
+ /* Now we expect it to be split. */
+ i = 0;
+ list_for_each(&regions, r, list) {
+ assert(mem_check(r));
+ i++;
+ if (r == &skiboot_os_reserve)
+ continue;
+ if (r == &skiboot_code_and_text)
+ continue;
+ if (r == &skiboot_heap)
+ continue;
+ if (r == &skiboot_after_heap)
+ continue;
+ if (r == &skiboot_cpu_stacks)
+ continue;
+ if (r == other) {
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ assert(r->len < 1024 * 1024);
+ } else {
+ assert(r->type == REGION_OS);
+ assert(r->start == other->start + other->len);
+ assert(r->start + r->len == other->start + 1024*1024);
+ }
+ }
+ assert(i == 7);
+
+ last = NULL;
+ list_for_each(&regions, r, list) {
+ if (last != r->name &&
+ strncmp(r->name, NODE_REGION_PREFIX,
+ strlen(NODE_REGION_PREFIX)) == 0) {
+ /* It's safe to cast away const as this is
+ * only going to happen in test code */
+ free((void*)r->name);
+ break;
+ }
+ last = r->name;
+ }
+
+ dt_free(dt_root);
+ free((void *)(long)skiboot_heap.start);
+ free(other_mem);
+ return 0;
+}
diff --git a/core/test/run-mem_region_release_unused_noalloc.c b/core/test/run-mem_region_release_unused_noalloc.c
new file mode 100644
index 0000000..818e272
--- /dev/null
+++ b/core/test/run-mem_region_release_unused_noalloc.c
@@ -0,0 +1,159 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+static void *__malloc(size_t size, const char *location __attribute__((unused)))
+{
+ return malloc(size);
+}
+
+static void *__realloc(void *ptr, size_t size, const char *location __attribute__((unused)))
+{
+ return realloc(ptr, size);
+}
+
+static void *__zalloc(size_t size, const char *location __attribute__((unused)))
+{
+ return calloc(size, 1);
+}
+
+static inline void __free(void *p, const char *location __attribute__((unused)))
+{
+ return free(p);
+}
+
+#include <skiboot.h>
+
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../mem_region.c"
+
+/* But we need device tree to make copies of names. */
+#undef is_rodata
+#define is_rodata(p) false
+
+#include "../device.c"
+#include <assert.h>
+#include <stdio.h>
+
+void lock(struct lock *l)
+{
+ l->lock_val++;
+}
+
+void unlock(struct lock *l)
+{
+ l->lock_val--;
+}
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static void add_mem_node(uint64_t start, uint64_t len)
+{
+ struct dt_node *mem;
+ u64 reg[2];
+ char name[sizeof("memory@") + STR_MAX_CHARS(reg[0])];
+
+ /* reg contains start and length */
+ reg[0] = cpu_to_be64(start);
+ reg[1] = cpu_to_be64(len);
+
+ sprintf(name, "memory@%llx", (long long)start);
+
+ mem = dt_new(dt_root, name);
+ dt_add_property_string(mem, "device_type", "memory");
+ dt_add_property(mem, "reg", reg, sizeof(reg));
+}
+
+void add_chip_dev_associativity(struct dt_node *dev __attribute__((unused)))
+{
+}
+
+int main(void)
+{
+ uint64_t i;
+ struct mem_region *r;
+ const char *last;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)malloc(TEST_HEAP_SIZE);
+ skiboot_heap.len = TEST_HEAP_SIZE;
+ skiboot_os_reserve.len = skiboot_heap.start;
+
+ dt_root = dt_new_root("");
+ dt_add_property_cells(dt_root, "#address-cells", 2);
+ dt_add_property_cells(dt_root, "#size-cells", 2);
+
+ add_mem_node(0, 0x100000000ULL);
+ add_mem_node(0x100000000ULL, 0x100000000ULL);
+
+ mem_region_init();
+
+ mem_region_release_unused();
+
+ assert(mem_check(&skiboot_heap));
+
+ /* Now we expect it to be split. */
+ i = 0;
+ list_for_each(&regions, r, list) {
+ assert(mem_check(r));
+ i++;
+ if (r == &skiboot_os_reserve)
+ continue;
+ if (r == &skiboot_code_and_text)
+ continue;
+ if (r == &skiboot_heap)
+ continue;
+ if (r == &skiboot_after_heap)
+ continue;
+ if (r == &skiboot_cpu_stacks)
+ continue;
+
+ /* the memory nodes should all be available to the OS now */
+ assert(r->type == REGION_OS);
+ }
+ assert(i == 9);
+
+ last = NULL;
+ list_for_each(&regions, r, list) {
+ if (last != r->name &&
+ strncmp(r->name, NODE_REGION_PREFIX,
+ strlen(NODE_REGION_PREFIX)) == 0) {
+ /* It's safe to cast away the const as
+ * this never happens at runtime,
+ * only in test and only for valgrind
+ */
+ free((void*)r->name);
+ }
+ last = r->name;
+ }
+
+ dt_free(dt_root);
+ free((void *)(long)skiboot_heap.start);
+ return 0;
+}
diff --git a/core/test/run-msg.c b/core/test/run-msg.c
new file mode 100644
index 0000000..cd36408
--- /dev/null
+++ b/core/test/run-msg.c
@@ -0,0 +1,256 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <inttypes.h>
+#include <assert.h>
+
+static bool zalloc_should_fail = false;
+static void *zalloc(size_t size)
+{
+ if (zalloc_should_fail) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ return calloc(size, 1);
+}
+
+#include "../opal-msg.c"
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+void opal_update_pending_evt(uint64_t evt_mask, uint64_t evt_values)
+{
+ (void)evt_mask;
+ (void)evt_values;
+}
+
+static long magic = 8097883813087437089UL;
+static void callback(void *data)
+{
+ assert(*(uint64_t *)data == magic);
+}
+
+static size_t list_count(struct list_head *list)
+{
+ size_t count = 0;
+ struct opal_msg_entry *dummy;
+
+ list_for_each(list, dummy, link)
+ count++;
+ return count;
+}
+
+int main(void)
+{
+ struct opal_msg_entry* entry;
+ int free_size = OPAL_MAX_MSGS;
+ int nfree = free_size;
+ int npending = 0;
+ int r;
+ static struct opal_msg m;
+ uint64_t *m_ptr = (uint64_t *)&m;
+
+ opal_init_msg();
+
+ assert(list_count(&msg_pending_list) == npending);
+ assert(list_count(&msg_free_list) == nfree);
+
+ /* Callback. */
+ r = opal_queue_msg(0, &magic, callback, (u64)0, (u64)1, (u64)2);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(m.params[0] == 0);
+ assert(m.params[1] == 1);
+ assert(m.params[2] == 2);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ /* No params. */
+ r = opal_queue_msg(0, NULL, NULL);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ /* > 8 params (ARRAY_SIZE(entry->msg.params) */
+ r = opal_queue_msg(0, NULL, NULL, 0, 1, 2, 3, 4, 5, 6, 7, 0xBADDA7A);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ assert(m.params[0] == 0);
+ assert(m.params[1] == 1);
+ assert(m.params[2] == 2);
+ assert(m.params[3] == 3);
+ assert(m.params[4] == 4);
+ assert(m.params[5] == 5);
+ assert(m.params[6] == 6);
+ assert(m.params[7] == 7);
+
+ /* 8 params (ARRAY_SIZE(entry->msg.params) */
+ r = opal_queue_msg(0, NULL, NULL, 0, 10, 20, 30, 40, 50, 60, 70);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ assert(m.params[0] == 0);
+ assert(m.params[1] == 10);
+ assert(m.params[2] == 20);
+ assert(m.params[3] == 30);
+ assert(m.params[4] == 40);
+ assert(m.params[5] == 50);
+ assert(m.params[6] == 60);
+ assert(m.params[7] == 70);
+
+ /* Full list (no free nodes in pending). */
+ while (nfree > 0) {
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == 0);
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+ }
+ assert(list_count(&msg_free_list) == 0);
+ assert(nfree == 0);
+ assert(npending == OPAL_MAX_MSGS);
+
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == OPAL_MAX_MSGS+1);
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == nfree);
+
+ /* Make zalloc fail to test error handling. */
+ zalloc_should_fail = true;
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == OPAL_RESOURCE);
+
+ assert(list_count(&msg_pending_list) == OPAL_MAX_MSGS+1);
+ assert(list_count(&msg_pending_list) == npending);
+ assert(list_count(&msg_free_list) == nfree);
+
+ /* Empty list (no nodes). */
+ while(!list_empty(&msg_pending_list)) {
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+ npending--;
+ nfree++;
+ }
+ assert(list_count(&msg_pending_list) == npending);
+ assert(list_count(&msg_free_list) == nfree);
+ assert(npending == 0);
+ assert(nfree == OPAL_MAX_MSGS+1);
+
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ /* Request invalid size. */
+ r = opal_get_msg(m_ptr, sizeof(m) - 1);
+ assert(r == OPAL_PARAMETER);
+
+ /* Pass null buffer. */
+ r = opal_get_msg(NULL, sizeof(m));
+ assert(r == OPAL_PARAMETER);
+
+ /* Get msg when none are pending. */
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == OPAL_RESOURCE);
+
+#define test_queue_num(type, val) \
+ r = opal_queue_msg(0, NULL, NULL, \
+ (type)val, (type)val, (type)val, (type)val, \
+ (type)val, (type)val, (type)val, (type)val); \
+ assert(r == 0); \
+ opal_get_msg(m_ptr, sizeof(m)); \
+ assert(r == OPAL_SUCCESS); \
+ assert(m.params[0] == (type)val); \
+ assert(m.params[1] == (type)val); \
+ assert(m.params[2] == (type)val); \
+ assert(m.params[3] == (type)val); \
+ assert(m.params[4] == (type)val); \
+ assert(m.params[5] == (type)val); \
+ assert(m.params[6] == (type)val); \
+ assert(m.params[7] == (type)val)
+
+ /* Test types of various widths */
+ test_queue_num(u64, -1);
+ test_queue_num(s64, -1);
+ test_queue_num(u32, -1);
+ test_queue_num(s32, -1);
+ test_queue_num(u16, -1);
+ test_queue_num(s16, -1);
+ test_queue_num(u8, -1);
+ test_queue_num(s8, -1);
+
+ /* Clean up the list to keep valgrind happy. */
+ while(!list_empty(&msg_free_list)) {
+ entry = list_pop(&msg_free_list, struct opal_msg_entry, link);
+ assert(entry);
+ free(entry);
+ }
+
+ while(!list_empty(&msg_pending_list)) {
+ entry = list_pop(&msg_pending_list, struct opal_msg_entry, link);
+ assert(entry);
+ free(entry);
+ }
+
+ return 0;
+}
diff --git a/core/test/run-trace.c b/core/test/run-trace.c
new file mode 100644
index 0000000..7dabebd
--- /dev/null
+++ b/core/test/run-trace.c
@@ -0,0 +1,386 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+/* Don't include these: PPC-specific */
+#define __CPU_H
+#define __TIME_H
+#define __PROCESSOR_H
+
+#if defined(__i386__) || defined(__x86_64__)
+/* This is more than a lwsync, but it'll work */
+static void full_barrier(void)
+{
+ asm volatile("mfence" : : : "memory");
+}
+#define lwsync full_barrier
+#define sync full_barrier
+#else
+#error "Define sync & lwsync for this arch"
+#endif
+
+#define zalloc(size) calloc((size), 1)
+
+struct cpu_thread {
+ uint32_t pir;
+ uint32_t chip_id;
+ struct trace_info *trace;
+ int server_no;
+ bool is_secondary;
+ struct cpu_thread *primary;
+};
+static struct cpu_thread *this_cpu(void);
+
+#define CPUS 4
+
+static struct cpu_thread fake_cpus[CPUS];
+
+static inline struct cpu_thread *next_cpu(struct cpu_thread *cpu)
+{
+ if (cpu == NULL)
+ return &fake_cpus[0];
+ cpu++;
+ if (cpu == &fake_cpus[CPUS])
+ return NULL;
+ return cpu;
+}
+
+#define first_cpu() next_cpu(NULL)
+
+#define for_each_cpu(cpu) \
+ for (cpu = first_cpu(); cpu; cpu = next_cpu(cpu))
+
+static unsigned long timestamp;
+static unsigned long mftb(void)
+{
+ return timestamp;
+}
+
+static void *local_alloc(unsigned int chip_id,
+ size_t size, size_t align)
+{
+ void *p;
+
+ (void)chip_id;
+ if (posix_memalign(&p, align, size))
+ p = NULL;
+ return p;
+}
+
+struct dt_node;
+extern struct dt_node *opal_node;
+
+#include "../trace.c"
+
+#define rmb() lwsync()
+
+#include "../external/trace.c"
+#include "../device.c"
+
+char __rodata_start[1], __rodata_end[1];
+struct dt_node *opal_node;
+struct debug_descriptor debug_descriptor = {
+ .trace_mask = -1
+};
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+struct cpu_thread *my_fake_cpu;
+static struct cpu_thread *this_cpu(void)
+{
+ return my_fake_cpu;
+}
+
+#include <sys/mman.h>
+#define PER_CHILD_TRACES (1024*1024)
+
+static void write_trace_entries(int id)
+{
+ void exit(int);
+ unsigned int i;
+ union trace trace;
+
+ timestamp = id;
+ for (i = 0; i < PER_CHILD_TRACES; i++) {
+ timestamp = i * CPUS + id;
+ assert(sizeof(trace.hdr) % 8 == 0);
+ /* First child never repeats, second repeats once, etc. */
+ trace_add(&trace, 3 + ((i / (id + 1)) % 0x40),
+ sizeof(trace.hdr));
+ }
+
+ /* Final entry has special type, so parent knows it's over. */
+ trace_add(&trace, 0x70, sizeof(trace.hdr));
+ exit(0);
+}
+
+static bool all_done(const bool done[])
+{
+ unsigned int i;
+
+ for (i = 0; i < CPUS; i++)
+ if (!done[i])
+ return false;
+ return true;
+}
+
+static void test_parallel(void)
+{
+ void *p;
+ unsigned int i, counts[CPUS] = { 0 }, overflows[CPUS] = { 0 };
+ unsigned int repeats[CPUS] = { 0 }, num_overflows[CPUS] = { 0 };
+ bool done[CPUS] = { false };
+ size_t len = sizeof(struct trace_info) + TBUF_SZ + sizeof(union trace);
+ int last = 0;
+
+ /* Use a shared mmap to test actual parallel buffers. */
+ i = (CPUS*len + getpagesize()-1)&~(getpagesize()-1);
+ p = mmap(NULL, i, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_SHARED, -1, 0);
+
+ for (i = 0; i < CPUS; i++) {
+ fake_cpus[i].trace = p + i * len;
+ fake_cpus[i].trace->tb.mask = TBUF_SZ - 1;
+ fake_cpus[i].trace->tb.max_size = sizeof(union trace);
+ fake_cpus[i].is_secondary = false;
+ }
+
+ for (i = 0; i < CPUS; i++) {
+ if (!fork()) {
+ /* Child. */
+ my_fake_cpu = &fake_cpus[i];
+ write_trace_entries(i);
+ }
+ }
+
+ while (!all_done(done)) {
+ union trace t;
+
+ for (i = 0; i < CPUS; i++) {
+ if (trace_get(&t, &fake_cpus[(i+last) % CPUS].trace->tb))
+ break;
+ }
+
+ if (i == CPUS) {
+ sched_yield();
+ continue;
+ }
+ i = (i + last) % CPUS;
+ last = i;
+
+ assert(t.hdr.cpu < CPUS);
+ assert(!done[t.hdr.cpu]);
+
+ if (t.hdr.type == TRACE_OVERFLOW) {
+ /* Conveniently, each record is 16 bytes here. */
+ assert(t.overflow.bytes_missed % 16 == 0);
+ overflows[i] += t.overflow.bytes_missed / 16;
+ num_overflows[i]++;
+ continue;
+ }
+
+ assert(t.hdr.timestamp % CPUS == t.hdr.cpu);
+ if (t.hdr.type == TRACE_REPEAT) {
+ assert(t.hdr.len_div_8 * 8 == sizeof(t.repeat));
+ assert(t.repeat.num != 0);
+ assert(t.repeat.num <= t.hdr.cpu);
+ repeats[t.hdr.cpu] += t.repeat.num;
+ } else if (t.hdr.type == 0x70) {
+ done[t.hdr.cpu] = true;
+ } else {
+ counts[t.hdr.cpu]++;
+ }
+ }
+
+ /* Gather children. */
+ for (i = 0; i < CPUS; i++) {
+ int status;
+ wait(&status);
+ }
+
+ for (i = 0; i < CPUS; i++) {
+ printf("Child %i: %u produced, %u overflows, %llu total\n", i,
+ counts[i], overflows[i],
+ (long long)fake_cpus[i].trace->tb.end);
+ assert(counts[i] + repeats[i] <= PER_CHILD_TRACES);
+ }
+ /* Child 0 never repeats. */
+ assert(repeats[0] == 0);
+ assert(counts[0] + overflows[0] == PER_CHILD_TRACES);
+
+ /*
+ * FIXME: Other children have some fuzz, since overflows may
+ * include repeat record we already read. And odd-numbered
+ * overflows may include more repeat records than normal
+ * records (they alternate).
+ */
+}
+
+int main(void)
+{
+ union trace minimal;
+ union trace large;
+ union trace trace;
+ unsigned int i, j;
+
+ opal_node = dt_new_root("opal");
+ for (i = 0; i < CPUS; i++) {
+ fake_cpus[i].server_no = i;
+ fake_cpus[i].is_secondary = (i & 0x1);
+ fake_cpus[i].primary = &fake_cpus[i & ~0x1];
+ }
+ init_trace_buffers();
+ my_fake_cpu = &fake_cpus[0];
+
+ for (i = 0; i < CPUS; i++) {
+ assert(trace_empty(&fake_cpus[i].trace->tb));
+ assert(!trace_get(&trace, &fake_cpus[i].trace->tb));
+ }
+
+ assert(sizeof(trace.hdr) % 8 == 0);
+ timestamp = 1;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.timestamp == timestamp);
+
+ /* Make it wrap once. */
+ for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8) + 1; i++) {
+ timestamp = i;
+ trace_add(&minimal, 99 + (i%2), sizeof(trace.hdr));
+ }
+
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ /* First one must be overflow marker. */
+ assert(trace.hdr.type == TRACE_OVERFLOW);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.overflow));
+ assert(trace.overflow.bytes_missed == minimal.hdr.len_div_8 * 8);
+
+ for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8); i++) {
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.timestamp == i+1);
+ assert(trace.hdr.type == 99 + ((i+1)%2));
+ }
+ assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
+
+ /* Now put in some weird-length ones, to test overlap.
+ * Last power of 2, minus 8. */
+ for (j = 0; (1 << j) < sizeof(large); j++);
+ for (i = 0; i < TBUF_SZ; i++) {
+ timestamp = i;
+ trace_add(&large, 100 + (i%2), (1 << (j-1)));
+ }
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_OVERFLOW);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.len_div_8 == large.hdr.len_div_8);
+ i = trace.hdr.timestamp;
+ while (trace_get(&trace, &my_fake_cpu->trace->tb))
+ assert(trace.hdr.timestamp == ++i);
+
+ /* Test repeats. */
+ for (i = 0; i < 65538; i++) {
+ timestamp = i;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ }
+ timestamp = i;
+ trace_add(&minimal, 101, sizeof(trace.hdr));
+ timestamp = i+1;
+ trace_add(&minimal, 101, sizeof(trace.hdr));
+
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 0);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 100);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 65535);
+ assert(trace.repeat.timestamp == 65535);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 65536);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 100);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 1);
+ assert(trace.repeat.timestamp == 65537);
+
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 65538);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 101);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 1);
+ assert(trace.repeat.timestamp == 65539);
+
+ /* Now, test adding repeat while we're reading... */
+ timestamp = 0;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 0);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 100);
+
+ for (i = 1; i < TBUF_SZ; i++) {
+ timestamp = i;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ if (i % 65536 == 0) {
+ assert(trace.hdr.type == 100);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ } else {
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 1);
+ }
+ assert(trace.repeat.timestamp == i);
+ assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
+ }
+
+ for (i = 0; i < CPUS; i++)
+ if (!fake_cpus[i].is_secondary)
+ free(fake_cpus[i].trace);
+
+ test_parallel();
+
+ return 0;
+}
diff --git a/core/test/stubs.c b/core/test/stubs.c
new file mode 100644
index 0000000..3233455
--- /dev/null
+++ b/core/test/stubs.c
@@ -0,0 +1,43 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Add any stub functions required for linking here. */
+#include <stdlib.h>
+
+static void stub_function(void)
+{
+ abort();
+}
+
+#define STUB(fnname) \
+ void fnname(void) __attribute__((weak, alias ("stub_function")))
+
+STUB(fdt_begin_node);
+STUB(fdt_property);
+STUB(fdt_end_node);
+STUB(fdt_create);
+STUB(fdt_add_reservemap_entry);
+STUB(fdt_finish_reservemap);
+STUB(fdt_strerror);
+STUB(fdt_check_header);
+STUB(_fdt_check_node_offset);
+STUB(fdt_next_tag);
+STUB(fdt_string);
+STUB(fdt_get_name);
+STUB(dt_first);
+STUB(dt_next);
+STUB(dt_has_node_property);
+STUB(dt_get_address);
+STUB(add_chip_dev_associativity);
diff --git a/core/timebase.c b/core/timebase.c
new file mode 100644
index 0000000..d51e96b
--- /dev/null
+++ b/core/timebase.c
@@ -0,0 +1,67 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <timebase.h>
+#include <fsp.h>
+
+void time_wait(unsigned long duration)
+{
+ unsigned long end = mftb() + duration;
+
+ while(tb_compare(mftb(), end) != TB_AAFTERB)
+ fsp_poll();
+}
+
+void time_wait_ms(unsigned long ms)
+{
+ time_wait(msecs_to_tb(ms));
+}
+
+void time_wait_us(unsigned long us)
+{
+ time_wait(usecs_to_tb(us));
+}
+
+unsigned long timespec_to_tb(const struct timespec *ts)
+{
+ unsigned long ns;
+
+ /* First convert to ns */
+ ns = ts->tv_sec * 1000000000ul;
+ ns += ts->tv_nsec;
+
+ /*
+ * This is a very rough approximation, it works provided
+ * we never try to pass too long delays here and the TB
+ * frequency isn't significantly lower than 512Mhz.
+ *
+ * We could improve the precision by shifting less bits
+ * at the expense of capacity or do 128 bit math which
+ * I'm not eager to do :-)
+ */
+ return (ns * (tb_hz >> 24)) / (1000000000ul >> 24);
+}
+
+int nanosleep(const struct timespec *req, struct timespec *rem)
+{
+ time_wait(timespec_to_tb(req));
+
+ if (rem) {
+ rem->tv_sec = 0;
+ rem->tv_nsec = 0;
+ }
+ return 0;
+}
diff --git a/core/trace.c b/core/trace.c
new file mode 100644
index 0000000..76f3c30
--- /dev/null
+++ b/core/trace.c
@@ -0,0 +1,244 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <trace.h>
+#include <timebase.h>
+#include <lock.h>
+#include <string.h>
+#include <stdlib.h>
+#include <cpu.h>
+#include <device.h>
+#include <libfdt.h>
+#include <processor.h>
+#include <skiboot.h>
+
+#define DEBUG_TRACES
+
+#define MAX_SIZE (sizeof(union trace) + 7)
+
+/* Smaller trace buffer for early booting */
+#define BOOT_TBUF_SZ 65536
+static struct {
+ struct trace_info trace_info;
+ char buf[BOOT_TBUF_SZ + MAX_SIZE];
+} boot_tracebuf __section(".data.boot_trace") = {
+ .trace_info = {
+ .lock = LOCK_UNLOCKED,
+ .tb = {
+ .mask = BOOT_TBUF_SZ - 1,
+ .max_size = MAX_SIZE
+ },
+ },
+ .buf = { 0 }
+};
+
+void init_boot_tracebuf(struct cpu_thread *boot_cpu)
+{
+ boot_cpu->trace = &boot_tracebuf.trace_info;
+}
+
+static size_t tracebuf_extra(void)
+{
+ /* We make room for the largest possible record */
+ return TBUF_SZ + MAX_SIZE;
+}
+
+/* To avoid bloating each entry, repeats are actually specific entries.
+ * tb->last points to the last (non-repeat) entry. */
+static bool handle_repeat(struct tracebuf *tb, const union trace *trace)
+{
+ struct trace_hdr *prev;
+ struct trace_repeat *rpt;
+ u32 len;
+
+ prev = (void *)tb->buf + (tb->last & tb->mask);
+
+ if (prev->type != trace->hdr.type
+ || prev->len_div_8 != trace->hdr.len_div_8
+ || prev->cpu != trace->hdr.cpu)
+ return false;
+
+ len = prev->len_div_8 << 3;
+ if (memcmp(prev + 1, &trace->hdr + 1, len - sizeof(*prev)) != 0)
+ return false;
+
+ /* If they've consumed prev entry, don't repeat. */
+ if (tb->last < tb->start)
+ return false;
+
+ /* OK, it's a duplicate. Do we already have repeat? */
+ if (tb->last + len != tb->end) {
+ /* FIXME: Reader is not protected from seeing this! */
+ rpt = (void *)tb->buf + ((tb->last + len) & tb->mask);
+ assert(tb->last + len + rpt->len_div_8*8 == tb->end);
+ assert(rpt->type == TRACE_REPEAT);
+
+ /* If this repeat entry is full, don't repeat. */
+ if (rpt->num == 0xFFFF)
+ return false;
+
+ rpt->num++;
+ rpt->timestamp = trace->hdr.timestamp;
+ return true;
+ }
+
+ /*
+ * Generate repeat entry: it's the smallest possible entry, so we
+ * must have eliminated old entries.
+ */
+ assert(trace->hdr.len_div_8 * 8 >= sizeof(*rpt));
+
+ rpt = (void *)tb->buf + (tb->end & tb->mask);
+ rpt->timestamp = trace->hdr.timestamp;
+ rpt->type = TRACE_REPEAT;
+ rpt->len_div_8 = sizeof(*rpt) >> 3;
+ rpt->cpu = trace->hdr.cpu;
+ rpt->prev_len = trace->hdr.len_div_8 << 3;
+ rpt->num = 1;
+ lwsync(); /* write barrier: complete repeat record before exposing */
+ tb->end += sizeof(*rpt);
+ return true;
+}
+
+void trace_add(union trace *trace, u8 type, u16 len)
+{
+ struct trace_info *ti = this_cpu()->trace;
+ unsigned int tsz;
+
+ trace->hdr.type = type;
+ trace->hdr.len_div_8 = (len + 7) >> 3;
+
+ tsz = trace->hdr.len_div_8 << 3;
+
+#ifdef DEBUG_TRACES
+ assert(tsz >= sizeof(trace->hdr));
+ assert(tsz <= sizeof(*trace));
+ assert(trace->hdr.type != TRACE_REPEAT);
+ assert(trace->hdr.type != TRACE_OVERFLOW);
+#endif
+ /* Skip traces not enabled in the debug descriptor */
+ if (!((1ul << trace->hdr.type) & debug_descriptor.trace_mask))
+ return;
+
+ trace->hdr.timestamp = mftb();
+ trace->hdr.cpu = this_cpu()->server_no;
+
+ lock(&ti->lock);
+
+ /* Throw away old entries before we overwrite them. */
+ while ((ti->tb.start + ti->tb.mask + 1) < (ti->tb.end + tsz)) {
+ struct trace_hdr *hdr;
+
+ hdr = (void *)ti->tb.buf + (ti->tb.start & ti->tb.mask);
+ ti->tb.start += hdr->len_div_8 << 3;
+ }
+
+ /* Must update ->start before we rewrite new entries. */
+ lwsync(); /* write barrier */
+
+ /* Check for duplicates... */
+ if (!handle_repeat(&ti->tb, trace)) {
+ /* This may go off end, and that's why ti->tb.buf is oversize */
+ memcpy(ti->tb.buf + (ti->tb.end & ti->tb.mask), trace, tsz);
+ ti->tb.last = ti->tb.end;
+ lwsync(); /* write barrier: write entry before exposing */
+ ti->tb.end += tsz;
+ }
+ unlock(&ti->lock);
+}
+
+static void trace_add_dt_props(void)
+{
+ unsigned int i;
+ u64 *prop, tmask;
+
+ prop = malloc(sizeof(u64) * 2 * debug_descriptor.num_traces);
+
+ for (i = 0; i < debug_descriptor.num_traces; i++) {
+ prop[i * 2] = cpu_to_fdt64(debug_descriptor.trace_phys[i]);
+ prop[i * 2 + 1] = cpu_to_fdt64(debug_descriptor.trace_size[i]);
+ }
+
+ dt_add_property(opal_node, "ibm,opal-traces",
+ prop, sizeof(u64) * 2 * i);
+ free(prop);
+
+ tmask = (uint64_t)&debug_descriptor.trace_mask;
+ dt_add_property_cells(opal_node, "ibm,opal-trace-mask",
+ hi32(tmask), lo32(tmask));
+}
+
+static void trace_add_desc(struct trace_info *t, uint64_t size)
+{
+ unsigned int i = debug_descriptor.num_traces;
+
+ if (i >= DEBUG_DESC_MAX_TRACES) {
+ prerror("TRACE: Debug descriptor trace list full !\n");
+ return;
+ }
+ debug_descriptor.num_traces++;
+
+ debug_descriptor.trace_phys[i] = (uint64_t)&t->tb;
+ debug_descriptor.trace_tce[i] = 0; /* populated later */
+ debug_descriptor.trace_size[i] = size;
+}
+
+/* Allocate trace buffers once we know memory topology */
+void init_trace_buffers(void)
+{
+ struct cpu_thread *t;
+ struct trace_info *any = &boot_tracebuf.trace_info;
+ uint64_t size;
+
+ /* Boot the boot trace in the debug descriptor */
+ trace_add_desc(any, sizeof(boot_tracebuf.buf));
+
+ /* Allocate a trace buffer for each primary cpu. */
+ for_each_cpu(t) {
+ if (t->is_secondary)
+ continue;
+
+ /* Use a 4K alignment for TCE mapping */
+ size = ALIGN_UP(sizeof(*t->trace) + tracebuf_extra(), 0x1000);
+ t->trace = local_alloc(t->chip_id, size, 0x1000);
+ if (t->trace) {
+ any = t->trace;
+ memset(t->trace, 0, size);
+ init_lock(&t->trace->lock);
+ t->trace->tb.mask = TBUF_SZ - 1;
+ t->trace->tb.max_size = MAX_SIZE;
+ trace_add_desc(any, sizeof(t->trace->tb) +
+ tracebuf_extra());
+ } else
+ prerror("TRACE: cpu 0x%x allocation failed\n", t->pir);
+ }
+
+ /* In case any allocations failed, share trace buffers. */
+ for_each_cpu(t) {
+ if (!t->is_secondary && !t->trace)
+ t->trace = any;
+ }
+
+ /* And copy those to the secondaries. */
+ for_each_cpu(t) {
+ if (!t->is_secondary)
+ continue;
+ t->trace = t->primary->trace;
+ }
+
+ /* Trace node in DT. */
+ trace_add_dt_props();
+}
diff --git a/core/utils.c b/core/utils.c
new file mode 100644
index 0000000..2bc57b1
--- /dev/null
+++ b/core/utils.c
@@ -0,0 +1,59 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <lock.h>
+#include <fsp.h>
+#include <processor.h>
+
+void abort(void)
+{
+ static bool in_abort = false;
+ unsigned long hid0;
+
+ if (in_abort)
+ for (;;) ;
+ in_abort = true;
+
+ bust_locks = true;
+
+ op_display(OP_FATAL, OP_MOD_CORE, 0x6666);
+
+ fputs("Aborting!\n", stderr);
+ backtrace();
+
+ /* XXX FIXME: We should fsp_poll for a while to ensure any pending
+ * console writes have made it out, but until we have decent PSI
+ * link handling we must not do it forever. Polling can prevent the
+ * FSP from bringing the PSI link up and it can get stuck in a
+ * reboot loop.
+ */
+
+ hid0 = mfspr(SPR_HID0);
+ hid0 |= SPR_HID0_ENABLE_ATTN;
+ set_hid0(hid0);
+ trigger_attn();
+ for (;;) ;
+}
+
+char __attrconst tohex(uint8_t nibble)
+{
+ static const char __tohex[] = {'0','1','2','3','4','5','6','7','8','9',
+ 'A','B','C','D','E','F'};
+ if (nibble > 0xf)
+ return '?';
+ return __tohex[nibble];
+}
diff --git a/core/vpd.c b/core/vpd.c
new file mode 100644
index 0000000..deb552c
--- /dev/null
+++ b/core/vpd.c
@@ -0,0 +1,211 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <vpd.h>
+#include <string.h>
+#include <fsp.h>
+#include <device.h>
+
+#define CHECK_SPACE(_p, _n, _e) (((_e) - (_p)) >= (_n))
+
+/* Low level keyword search in a record. Can be used when we
+ * need to find the next keyword of a given type, for example
+ * when having multiple MF/SM keyword pairs
+ */
+const void *vpd_find_keyword(const void *rec, size_t rec_sz,
+ const char *kw, uint8_t *kw_size)
+{
+ const uint8_t *p = rec, *end = rec + rec_sz;
+
+ while (CHECK_SPACE(p, 3, end)) {
+ uint8_t k1 = *(p++);
+ uint8_t k2 = *(p++);
+ uint8_t sz = *(p++);
+
+ if (k1 == kw[0] && k2 == kw[1]) {
+ if (kw_size)
+ *kw_size = sz;
+ return p;
+ }
+ p += sz;
+ }
+ return NULL;
+}
+
+/* Locate a record in a VPD blob
+ *
+ * Note: This works with VPD LIDs. It will scan until it finds
+ * the first 0x84, so it will skip all those 0's that the VPD
+ * LIDs seem to contain
+ */
+const void *vpd_find_record(const void *vpd, size_t vpd_size,
+ const char *record, size_t *sz)
+{
+ const uint8_t *p = vpd, *end = vpd + vpd_size;
+ bool first_start = true;
+ size_t rec_sz;
+ uint8_t namesz = 0;
+ const char *rec_name;
+
+ while (CHECK_SPACE(p, 4, end)) {
+ /* Get header byte */
+ if (*(p++) != 0x84) {
+ /* Skip initial crap in VPD LIDs */
+ if (first_start)
+ continue;
+ break;
+ }
+ first_start = false;
+ rec_sz = *(p++);
+ rec_sz |= *(p++) << 8;
+ if (!CHECK_SPACE(p, rec_sz, end)) {
+ prerror("VPD: Malformed or truncated VPD,"
+ " record size doesn't fit\n");
+ return NULL;
+ }
+
+ /* Find record name */
+ rec_name = vpd_find_keyword(p, rec_sz, "RT", &namesz);
+ if (rec_name && strncmp(record, rec_name, namesz) == 0) {
+ *sz = rec_sz;
+ return p;
+ }
+
+ p += rec_sz;
+ if (*(p++) != 0x78) {
+ prerror("VPD: Malformed or truncated VPD,"
+ " missing final 0x78 in record %.4s\n",
+ rec_name ? rec_name : "????");
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+/* Locate a keyword in a record in a VPD blob
+ *
+ * Note: This works with VPD LIDs. It will scan until it finds
+ * the first 0x84, so it will skip all those 0's that the VPD
+ * LIDs seem to contain
+ */
+const void *vpd_find(const void *vpd, size_t vpd_size,
+ const char *record, const char *keyword,
+ uint8_t *sz)
+{
+ size_t rec_sz;
+ const uint8_t *p;
+
+ p = vpd_find_record(vpd, vpd_size, record, &rec_sz);
+ if (p)
+ p = vpd_find_keyword(p, rec_sz, keyword, sz);
+ return p;
+}
+
+/* Helper to load a VPD LID. Pass a ptr to the corresponding LX keyword */
+static void *vpd_lid_load(const uint8_t *lx, uint8_t lxrn, size_t *size)
+{
+ /* Now this is a guess game as we don't have the info from the
+ * pHyp folks. But basically, it seems to boil down to loading
+ * a LID whose name is 0x80e000yy where yy is the last 2 digits
+ * of the LX record in hex.
+ *
+ * [ Correction: After a chat with some folks, it looks like it's
+ * actually 4 digits, though the lid number is limited to fff
+ * so we weren't far off. ]
+ *
+ * For safety, we look for a matching LX record in an LXRn
+ * (n = lxrn argument) or in VINI if lxrn=0xff
+ */
+ uint32_t lid_no = 0x80e00000 | ((lx[6] & 0xf) << 8) | lx[7];
+
+ /* We don't quite know how to get to the LID directory so
+ * we don't know the size. Let's allocate 16K. All the VPD LIDs
+ * I've seen so far are much smaller.
+ */
+#define VPD_LID_MAX_SIZE 0x4000
+ void *data = malloc(VPD_LID_MAX_SIZE);
+ char record[4] = "LXR0";
+ const void *valid_lx;
+ uint8_t lx_size;
+ int rc;
+
+ if (!data) {
+ prerror("VPD: Failed to allocate memory for LID\n");
+ return NULL;
+ }
+
+ /* Adjust LID number for flash side */
+ lid_no = fsp_adjust_lid_side(lid_no);
+ printf("VPD: Trying to load VPD LID 0x%08x...\n", lid_no);
+
+ *size = VPD_LID_MAX_SIZE;
+
+ /* Load it from the FSP */
+ rc = fsp_fetch_data(0, FSP_DATASET_NONSP_LID, lid_no, 0, data, size);
+ if (rc) {
+ prerror("VPD: Error %d loading VPD LID\n", rc);
+ goto fail;
+ }
+
+ /* Validate it */
+ if (lxrn < 9)
+ record[3] = '0' + lxrn;
+ else
+ memcpy(record, "VINI", 4);
+
+ valid_lx = vpd_find(data, *size, record, "LX", &lx_size);
+ if (!valid_lx || lx_size != 8) {
+ prerror("VPD: Cannot find validation LX record\n");
+ goto fail;
+ }
+ if (memcmp(valid_lx, lx, 8) != 0) {
+ prerror("VPD: LX record mismatch !\n");
+ goto fail;
+ }
+
+ printf("VPD: Loaded %zu bytes\n", *size);
+
+ /* Got it ! */
+ return realloc(data, *size);
+ fail:
+ free(data);
+ return NULL;
+}
+
+void vpd_iohub_load(struct dt_node *hub_node)
+{
+ void *vpd;
+ size_t sz;
+ const uint32_t *p;
+ unsigned int lx_idx;
+ const char *lxr;
+
+ p = dt_prop_get_def(hub_node, "ibm,vpd-lx-info", NULL);
+ if (!p)
+ return;
+
+ lx_idx = p[0];
+ lxr = (const char *)&p[1];
+
+ vpd = vpd_lid_load(lxr, lx_idx, &sz);
+ if (!vpd) {
+ prerror("VPD: Failed to load VPD LID\n");
+ } else {
+ dt_add_property(hub_node, "ibm,io-vpd", vpd, sz);
+ free(vpd);
+ }
+}
diff --git a/doc/device-tree.txt b/doc/device-tree.txt
new file mode 100644
index 0000000..a0e4457
--- /dev/null
+++ b/doc/device-tree.txt
@@ -0,0 +1,516 @@
+/*
+ * Sapphire device-tree requirements
+ *
+ * Version: 0.2.1
+ *
+ * This documents the generated device-tree requirements, this is based on
+ * a commented device-tree dump obtained from a DT generated by Sapphire
+ * itself from an HDAT.
+ */
+
+/*
+ * General comments:
+ *
+ * - skiboot does not require nodes to have phandle properties, but
+ * if you have them then *all* nodes must have them including the
+ * root of the device-tree (currently a HB bug !). It is recommended
+ * to have them since they are needed to represent the cache levels.
+ *
+ * NOTE: The example tree below only has phandle properties for
+ * nodes that are referenced by other nodes. This is *not* correct
+ * and is purely done for keeping this document smaller, make sure
+ * to follow the rule above.
+ *
+ * - Only the "phandle" property is required. Sapphire also generates
+ * a "linux,phandle" for backward compatibility but doesn't require
+ * it as an input
+ *
+ * - Any property not specifically documented must be put in "as is"
+ *
+ * - All ibm,chip-id properties contain a HW chip ID which correspond
+ * on P8 to the PIR value shifted right by 7 bits, ie. it's a 6-bit
+ * value made of a 3-bit node number and a 3-bit chip number.
+ *
+ * - Unit addresses (@xxxx part of node names) should if possible use
+ * lower case hexadecimal to be consistent with what skiboot does
+ * and to help some stupid parsers out there...
+ */
+
+/*
+ * Version history
+ *
+ * 2013/10/08 : Version 0.1
+ *
+ * 2013/10/09 : Version 0.2
+ *
+ * - Add comment about case of unit addresses
+ * - Add missing lpc node definition
+ * - Add example UART node on LPC
+ * - Remove "status" property from PSI xsco nodes
+ *
+ * 2014/03/26 : Version 0.2.1
+ *
+ * - Fix cpus/xxx/ibm,pa-features to be a byte array
+ */
+
+/dts-v1/;
+
+
+/*
+ * Here are the reserve map entries. They should exactly match the
+ * reserved-ranges property of the root node (see documentation
+ * of that property)
+ */
+
+/memreserve/ 0x00000007fe600000 0x0000000000100000;
+/memreserve/ 0x00000007fe200000 0x0000000000100000;
+/memreserve/ 0x0000000031e00000 0x00000000003e0000;
+/memreserve/ 0x0000000031000000 0x0000000000e00000;
+/memreserve/ 0x0000000030400000 0x0000000000c00000;
+/memreserve/ 0x0000000030000000 0x0000000000400000;
+/memreserve/ 0x0000000400000000 0x0000000000600450;
+
+/* Root node */
+/ {
+ /*
+ * "compatible" properties are string lists (ASCII strings separated by
+ * \0 characters) indicating the overall compatibility from the more
+ * specific to the least specific.
+ *
+ * The root node compatible property *must* contain "ibm,powernv" for
+ * Linux to have the powernv platform match the machine. It is recommended
+ * to add a slightly more precise property (first in order) indicating more
+ * precisely the board type. We don't currently do that in HDAT based
+ * setups but will.
+ *
+ * The standard naming is "vendor,name" so in your case, something like
+ *
+ * compatible = "goog,rhesus","ibm,powernv";
+ *
+ * would work. Or even better:
+ *
+ * compatible = "goog,rhesus-v1","goog,rhesus","ibm,powernv";
+ */
+ compatible = "ibm,powernv";
+
+ /* mandatory */
+ #address-cells = <0x2>;
+ #size-cells = <0x2>;
+
+ /* User visible board name (will be shown in /proc/cpuinfo) */
+ model = "Machine Name";
+
+ /*
+ * The reserved-names and reserve-names properties work hand in hand. The first one
+ * is a list of strings providing a "name" for each entry in the second one using
+ * the traditional "vendor,name" format.
+ *
+ * The reserved-ranges property contains a list of ranges, each in the form of 2 cells
+ * of address and 2 cells of size (64-bit x2 so each entry is 4 cells) indicating
+ * regions of memory that are reserved and must not be overwritten by skiboot or
+ * subsequently by the Linux Kernel.
+ *
+ * Corresponding entries must also be created in the "reserved map" part of the flat
+ * device-tree (which is a binary list in the header of the fdt).
+ *
+ * Unless a component (skiboot or Linux) specifically knows about a region (usually
+ * based on its name) and decides to change or remove it, all these regions are
+ * passed as-is to Linux and to subsequent kernels accross kexec and are kept
+ * preserved.
+ *
+ * NOTE: Do *NOT* copy the entries below, they are just an example and are actually
+ * created by skiboot itself. They represent the SLW image as "detected" by reading
+ * the PBA BARs and skiboot own memory allocations.
+ *
+ * I would recommend that you put in there the SLW and OCC (or HOMER as one block
+ * if that's how you use it) and any additional memory you want to preserve such
+ * as FW log buffers etc...
+ */
+
+ reserved-names = "ibm,slw-image", "ibm,slw-image", "ibm,firmware-stacks", "ibm,firmware-data", "ibm,firmware-heap", "ibm,firmware-code", "memory@400000000";
+ reserved-ranges = <0x7 0xfe600000 0x0 0x100000 0x7 0xfe200000 0x0 0x100000 0x0 0x31e00000 0x0 0x3e0000 0x0 0x31000000 0x0 0xe00000 0x0 0x30400000 0x0 0xc00000 0x0 0x30000000 0x0 0x400000 0x4 0x0 0x0 0x600450>;
+
+ /* Mandatory */
+ cpus {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+
+ /*
+ * The following node must exist for each *core* in the system. The unit
+ * address (number after the @) is the hexadecimal HW CPU number (PIR value)
+ * of thread 0 of that core.
+ */
+ PowerPC,POWER8@20 {
+ /* mandatory/standard properties */
+ device_type = "cpu";
+ 64-bit;
+ 32-64-bridge;
+ graphics;
+ general-purpose;
+
+ /*
+ * The "status" property indicate whether the core is functional. It's
+ * a string containing "okay" for a good core or "bad" for a non-functional
+ * one. You can also just ommit the non-functional ones from the DT
+ */
+ status = "okay";
+
+ /*
+ * This is the same value as the PIR of thread 0 of that core
+ * (ie same as the @xx part of the node name)
+ */
+ reg = <0x20>;
+
+ /* same as above */
+ ibm,pir = <0x20>;
+
+ /* chip ID of this core */
+ ibm,chip-id = <0x0>;
+
+ /*
+ * interrupt server numbers (aka HW processor numbers) of all threads
+ * on that core. This should have 8 numbers and the first one should
+ * have the same value as the above ibm,pir and reg properties
+ */
+ ibm,ppc-interrupt-server#s = <0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27>;
+
+ /*
+ * This is the "architected processor version" as defined in PAPR. Just
+ * stick to 0x0f000004 for P8 and things will be fine
+ */
+ cpu-version = <0x0f000004>;
+
+ /*
+ * These are various definitions of the page sizes and segment sizes
+ * supported by the MMU, those values are fine for P8 for now
+ */
+ ibm,processor-segment-sizes = <0x1c 0x28 0xffffffff 0xffffffff>;
+ ibm,processor-page-sizes = <0xc 0x10 0x18 0x22>;
+ ibm,segment-page-sizes = <0xc 0x0 0x3 0xc 0x0 0x10 0x7 0x18 0x38 0x10 0x110 0x2 0x10 0x1 0x18 0x8 0x18 0x100 0x1 0x18 0x0 0x22 0x120 0x1 0x22 0x3>;
+
+ /*
+ * Similarily that might need to be reviewed later but will do for now...
+ */
+ ibm,pa-features = [0x6 0x0 0xf6 0x3f 0xc7 0x0 0x80 0xc0];
+
+ /* SLB size, use as-is */
+ ibm,slb-size = <0x20>;
+
+ /* VSX support, use as-is */
+ ibm,vmx = <0x2>;
+
+ /* DFP support, use as-is */
+ ibm,dfp = <0x2>;
+
+ /* PURR/SPURR support, use as-is */
+ ibm,purr = <0x1>;
+ ibm,spurr = <0x1>;
+
+ /*
+ * Old-style core clock frequency. Only create this property if the frequency fits
+ * in a 32-bit number. Do not create it if it doesn't
+ */
+ clock-frequency = <0xf5552d00>;
+
+ /*
+ * mandatory: 64-bit version of the core clock frequency, always create this
+ * property.
+ */
+ ibm,extended-clock-frequency = <0x0 0xf5552d00>;
+
+ /* Timebase freq has a fixed value, always use that */
+ timebase-frequency = <0x1e848000>;
+
+ /* Same */
+ ibm,extended-timebase-frequency = <0x0 0x1e848000>;
+
+ /* Use as-is, values might need to be adjusted but that will do for now */
+ reservation-granule-size = <0x80>;
+ d-tlb-size = <0x800>;
+ i-tlb-size = <0x0>;
+ tlb-size = <0x800>;
+ d-tlb-sets = <0x4>;
+ i-tlb-sets = <0x0>;
+ tlb-sets = <0x4>;
+ d-cache-block-size = <0x80>;
+ i-cache-block-size = <0x80>;
+ d-cache-size = <0x10000>;
+ i-cache-size = <0x8000>;
+ i-cache-sets = <0x4>;
+ d-cache-sets = <0x8>;
+ performance-monitor = <0x0 0x1>;
+
+ /*
+ * optional: phandle of the node representing the L2 cache for this core,
+ * note: it can also be named "next-level-cache", Linux will support both
+ * and Sapphire doesn't currently use those properties, just passes them
+ * along to Linux
+ */
+ l2-cache = < 0x4 >;
+ };
+
+ /*
+ * Cache nodes. Those are siblings of the processor nodes under /cpus and
+ * represent the various level of caches.
+ *
+ * The unit address (and reg property) is mostly free-for-all as long as
+ * there is no collisions. On HDAT machines we use the following encoding
+ * which I encourage you to also follow to limit surprises:
+ *
+ * L2 : (0x20 << 24) | PIR (PIR is PIR value of thread 0 of core)
+ * L3 : (0x30 << 24) | PIR
+ * L3.5 : (0x35 << 24) | PIR
+ *
+ * In addition, each cache points to the next level cache via its
+ * own "l2-cache" (or "next-level-cache") property, so the core node
+ * points to the L2, the L2 points to the L3 etc...
+ */
+
+ l2-cache@20000020 {
+ phandle = <0x4>;
+ device_type = "cache";
+ reg = <0x20000020>;
+ status = "okay";
+ cache-unified;
+ d-cache-sets = <0x8>;
+ i-cache-sets = <0x8>;
+ d-cache-size = <0x80000>;
+ i-cache-size = <0x80000>;
+ l2-cache = <0x5>;
+ };
+
+ l3-cache@30000020 {
+ phandle = <0x5>;
+ device_type = "cache";
+ reg = <0x30000020>;
+ status = "bad";
+ cache-unified;
+ d-cache-sets = <0x8>;
+ i-cache-sets = <0x8>;
+ d-cache-size = <0x800000>;
+ i-cache-size = <0x800000>;
+ };
+
+ };
+
+ /*
+ * Interrupt presentation controller (ICP) nodes
+ *
+ * There is some flexibility as to how many of these are presents since
+ * a given node can represent multiple ICPs. When generating from HDAT we
+ * chose to create one per core
+ */
+ interrupt-controller@3ffff80020000 {
+ /* Mandatory */
+ compatible = "IBM,ppc-xicp", "IBM,power8-icp";
+ interrupt-controller;
+ #address-cells = <0x0>;
+ #interrupt-cells = <0x1>;
+ device_type = "PowerPC-External-Interrupt-Presentation";
+
+ /*
+ * Range of HW CPU IDs represented by that node. In this example
+ * the core starting at PIR 0x20 and 8 threads, which corresponds
+ * to the CPU node of the example above. The property in theory
+ * supports multiple ranges but Linux doesn't.
+ */
+ ibm,interrupt-server-ranges = <0x20 0x8>;
+
+ /*
+ * For each server in the above range, the physical address of the
+ * ICP register block and its size. Since the root node #address-cells
+ * and #size-cells properties are both "2", each entry is thus
+ * 2 cells address and 2 cells size (64-bit each).
+ */
+ reg = <0x3ffff 0x80020000 0x0 0x1000 0x3ffff 0x80021000 0x0 0x1000 0x3ffff 0x80022000 0x0 0x1000 0x3ffff 0x80023000 0x0 0x1000 0x3ffff 0x80024000 0x0 0x1000 0x3ffff 0x80025000 0x0 0x1000 0x3ffff 0x80026000 0x0 0x1000 0x3ffff 0x80027000 0x0 0x1000>;
+ };
+
+ /*
+ * The "memory" nodes represent physical memory in the system. They
+ * do not represent DIMMs, memory controllers or Centaurs, thus will
+ * be expressed separately.
+ *
+ * In order to be able to handle affinity propertly, we require that
+ * a memory node is created for each range of memory that has a different
+ * "affinity", which in practice means for each chip since we don't
+ * support memory interleaved accross multiple chips on P8.
+ *
+ * Additionally, it is *not* required that one chip = one memory node,
+ * it is perfectly acceptable to break down the memory of one chip into
+ * multiple memory nodes (typically skiboot does that if the two MCs
+ * are not interlaved).
+ */
+ memory@0 {
+ device_type = "memory";
+
+ /*
+ * We support multiple entries in the ibm,chip-id property for
+ * memory nodes in case the memory is interleaved accross multiple
+ * chips but that shouldn't happen on P8
+ */
+ ibm,chip-id = <0x0>;
+
+ /* The "reg" property is 4 cells, as usual for a child of
+ * the root node, 2 cells of address and 2 cells of size
+ */
+ reg = <0x0 0x0 0x4 0x0>;
+ };
+
+ /*
+ * The XSCOM node. This is the closest thing to a "chip" node we have.
+ * there must be one per chip in the system (thus a DCM has two) and
+ * while it represents the "parent" of various devices on the PIB/PCB
+ * that we want to expose, it is also used to store all sort of
+ * miscellaneous per-chip information on HDAT based systems (such
+ * as VPDs).
+ */
+ xscom@3fc0000000000 {
+ /* standard & mandatory */
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ scom-controller;
+ compatible = "ibm,xscom", "ibm,power8-xscom";
+
+ /* The chip ID as usual ... */
+ ibm,chip-id = <0x0>;
+
+ /* The base address of xscom for that chip */
+ reg = <0x3fc00 0x0 0x8 0x0>;
+
+ /*
+ * This comes from HDAT and I *think* is the raw content of the
+ * module VPD eeprom (and thus doesn't have a standard ASCII keyword
+ * VPD format). We don't currently use it though ...
+ */
+ ibm,module-vpd = < ... big pile of binary data ... >;
+
+ /* PSI host bridge XSCOM register set */
+ psihb@2010900 {
+ reg = <0x2010900 0x20>;
+ compatible = "ibm,power8-psihb-x", "ibm,psihb-x";
+ };
+
+ /* Chip TOD XSCOM register set */
+ chiptod@40000 {
+ reg = <0x40000 0x34>;
+ compatible = "ibm,power-chiptod", "ibm,power8-chiptod";
+
+ /*
+ * Create that property with no value if this chip has
+ * the Primary TOD in the topology. If it has the secondary
+ * one (backup master ?) use "secondary".
+ */
+ primary;
+ };
+
+ /* NX XSCOM register set */
+ nx@2010000 {
+ reg = <0x2010000 0x4000>;
+ compatible = "ibm,power-nx", "ibm,power8-nx";
+ };
+
+ /*
+ * PCI "PE Master" XSCOM register set for each active PHB
+ *
+ * For now, do *not* create these if the PHB isn't connected,
+ * clocked, or the PHY/HSS not configured.
+ */
+ pbcq@2012000 {
+ reg = <0x2012000 0x20 0x9012000 0x5 0x9013c00 0x15>;
+ compatible = "ibm,power8-pbcq";
+
+ /* Indicate the PHB index on the chip, ie, 0,1 or 2 */
+ ibm,phb-index = <0x0>;
+
+ /* Create that property to use the IBM-style "A/B" dual input
+ * slot presence detect mechanism.
+ */
+ ibm,use-ab-detect;
+
+ /*
+ * TBD: Lane equalization values. Not currently used by
+ * skiboot but will have to be sorted out
+ */
+ ibm,lane_eq = <0x0>;
+ };
+
+ pbcq@2012400 {
+ reg = <0x2012400 0x20 0x9012400 0x5 0x9013c40 0x15>;
+ compatible = "ibm,power8-pbcq";
+ ibm,phb-index = <0x1>;
+ ibm,use-ab-detect;
+ ibm,lane_eq = <0x0>;
+ };
+
+ /*
+ * Here's the LPC bus. Ideally each chip has one but in
+ * practice it's ok to only populate the ones actually
+ * used for something. This is not an exact representation
+ * of HW, in that case we would have eccb -> opb -> lpc,
+ * but instead we just have an lpc node and the address is
+ * the base of the ECCB register set for it
+ *
+ * Devices on the LPC are represented as children nodes,
+ * see example below for a standard UART.
+ */
+ lpc@b0020 {
+ /*
+ * Empty property indicating this is the primary
+ * LPC bus. It will be used for the default UART
+ * if any and this is the bus that will be used
+ * by Linux as the virtual 64k of IO ports
+ */
+ primary;
+
+ /*
+ * 2 cells of address, the first one indicates the
+ * address type, see below
+ */
+ #address-cells = <0x2>;
+ #size-cells = <0x1>;
+ reg = <0xb0020 0x4>;
+ compatible = "ibm,power8-lpc";
+
+ /*
+ * Example device: a UART on IO ports.
+ *
+ * LPC address have 2 cells. The first cell is the
+ * address type as follow:
+ *
+ * 0 : LPC memory space
+ * 1 : LPC IO space
+ * 2: LPC FW space
+ *
+ * (This corresponds to the OPAL_LPC_* arguments
+ * passed to the opal_lpc_read/write functions)
+ *
+ * The unit address follows the old ISA convention
+ * for open firmware which prefixes IO ports with "i".
+ *
+ * (This is not critical and can be 1,3f8 if that's
+ * problematic to generate)
+ */
+ serial@i3f8 {
+ reg = <0x1 0x3f8 8>;
+ compatible = "ns16550", "pnpPNP,501";
+
+ /* Baud rate generator base frequency */
+ clock-frequency = < 1843200 >;
+
+ /* Default speed to use */
+ current-speed = < 115200 >;
+
+ /* Historical, helps Linux */
+ device_type = "serial";
+
+ /*
+ * Indicate which chip ID the interrupt
+ * is routed to (we assume it will always
+ * be the "host error interrupt" (aka
+ * "TPM interrupt" of that chip).
+ */
+ ibm,irq-chip-id = <0x0>;
+ }
+ };
+ };
+};
diff --git a/doc/error-logging.txt b/doc/error-logging.txt
new file mode 100644
index 0000000..a29d368
--- /dev/null
+++ b/doc/error-logging.txt
@@ -0,0 +1,384 @@
+How to log errors on Sapphire and POWERNV:
+=========================================
+
+Currently the errors reported by POWERNV/Sapphire (OPAL) interfaces
+are in free form, where as errors reported by FSP is in standard Platform
+Error Log (PEL) format. For out-of band management via IPMI interfaces,
+it is necessary to push down the errors to FSP via mailbox
+(reported by POWERNV/Sapphire) in PEL format.
+
+PEL size can vary from 2K-16K bytes, fields of which needs to populated
+based on the kind of event and error that needs to be reported.
+All the information needed to be reported as part of the error, is
+passed by user using the error-logging interfaces outlined below.
+Following which, PEL structure is generated based on the input and
+then passed on to FSP.
+
+Error logging interfaces in Sapphire:
+====================================
+
+Interfaces are provided for the user to log/report an error in Sapphire.
+Using these interfaces relevant error information is collected and later
+converted to PEL format and then pushed to FSP.
+
+Step 1: To report an error, invoke opal_elog_create() with required argument.
+
+ struct opal_errorlog *opal_elog_create(int reason_code);
+
+ Each error/event that needs to be reported should do so with its
+ unique 32 bit reason code/SRC. Based on this SRC, relevant information
+ around that error/event is gathered from look-up table and updated
+ into the error log buffer.
+
+ Parameters:
+
+ int reason_code: Reason for failure as stated in include/fsp-elog.h
+ for Sapphire
+ Eg: Reason code for code-update failures can be
+ OPAL_RC_CU_INIT -> Initialisation failure
+ OPAL_RC_CU_FLASH -> Flash failure
+
+ Following info is gathered from the look-up table in fsp-elog_write.c
+ and is pre-defined for a given error.
+
+ uint8_t opal_error_event_type: Classification of error/events
+ type reported on OPAL
+ /* Platform Events/Errors: Report Machine Check Interrupt */
+ #define OPAL_PLATFORM_ERR_EVT 0x01
+ /* INPUT_OUTPUT: Report all I/O related events/errors */
+ #define OPAL_INPUT_OUTPUT_ERR_EVT 0x02
+ /* RESOURCE_DEALLOC: Hotplug events and errors */
+ #define OPAL_RESOURCE_DEALLOC_ERR_EVT 0x03
+ /* MISC: Miscellanous error */
+ #define OPAL_MISC_ERR_EVT 0x04
+
+ uint16_t component_id: Component ID of Sapphire component as
+ listed in include/fsp-elog.h
+
+ uint8_t subsystem_id: ID of the sub-system reporting error.
+ /* OPAL Subsystem IDs listed for reporting events/errors */
+ #define OPAL_PROCESSOR_SUBSYSTEM 0x10
+ #define OPAL_MEMORY_SUBSYSTEM 0x20
+ #define OPAL_IO_SUBSYSTEM 0x30
+ #define OPAL_IO_DEVICES 0x40
+ #define OPAL_CEC_HARDWARE 0x50
+ #define OPAL_POWER_COOLING 0x60
+ #define OPAL_MISC 0x70
+ #define OPAL_SURVEILLANCE_ERR 0x7A
+ #define OPAL_PLATFORM_FIRMWARE 0x80
+ #define OPAL_SOFTWARE 0x90
+ #define OPAL_EXTERNAL_ENV 0xA0
+
+ uint8_t event_severity: Severity of the event/error to be reported
+ #define OPAL_INFO 0x00
+ #define OPAL_RECOVERED_ERR_GENERAL 0x10
+
+ /* 0x2X series is to denote set of Predictive Error */
+ /* 0x20 Generic predictive error */
+ #define OPAL_PREDICTIVE_ERR_GENERAL 0x20
+ /* 0x21 Predictive error, degraded performance */
+ #define OPAL_PREDICTIVE_ERR_DEGRADED_PERF 0x21
+ /* 0x22 Predictive error, fault may be corrected after reboot */
+ #define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT 0x22
+ /*
+ * 0x23 Predictive error, fault may be corrected after reboot,
+ * degraded performance
+ */
+ #define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_BOOT_DEGRADE_PERF 0x23
+ /* 0x24 Predictive error, loss of redundancy */
+ #define OPAL_PREDICTIVE_ERR_LOSS_OF_REDUNDANCY 0x24
+
+ /* 0x4X series for Unrecoverable Error */
+ /* 0x40 Generic Unrecoverable error */
+ #define OPAL_UNRECOVERABLE_ERR_GENERAL 0x40
+ /* 0x41 Unrecoverable error bypassed with degraded performance */
+ #define OPAL_UNRECOVERABLE_ERR_DEGRADE_PERF 0x41
+ /* 0x44 Unrecoverable error bypassed with loss of redundancy */
+ #define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY 0x44
+ /* 0x45 Unrecoverable error bypassed with loss of redundancy and performance */
+ #define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY_PERF 0x45
+ /* 0x48 Unrecoverable error bypassed with loss of function */
+ #define OPAL_UNRECOVERABLE_ERR_LOSS_OF_FUNCTION 0x48
+
+ #define OPAL_ERROR_PANIC 0x50
+
+ uint8_t event_subtype: Event Sub-type
+ #define OPAL_NA 0x00
+ #define OPAL_MISCELLANEOUS_INFO_ONLY 0x01
+ #define OPAL_PREV_REPORTED_ERR_RECTIFIED 0x10
+ #define OPAL_SYS_RESOURCES_DECONFIG_BY_USER 0x20
+ #define OPAL_SYS_RESOURCE_DECONFIG_PRIOR_ERR 0x21
+ #define OPAL_RESOURCE_DEALLOC_EVENT_NOTIFY 0x22
+ #define OPAL_CONCURRENT_MAINTENANCE_EVENT 0x40
+ #define OPAL_CAPACITY_UPGRADE_EVENT 0x60
+ #define OPAL_RESOURCE_SPARING_EVENT 0x70
+ #define OPAL_DYNAMIC_RECONFIG_EVENT 0x80
+ #define OPAL_NORMAL_SYS_PLATFORM_SHUTDOWN 0xD0
+ #define OPAL_ABNORMAL_POWER_OFF 0xE0
+
+ uint8_t opal_srctype: SRC type, value should be OPAL_SRC_TYPE_ERROR.
+ SRC refers to System Reference Code.
+ It is 4 byte hexa-decimal number that reflects the
+ current system state.
+ Eg: BB821010,
+ 1st byte -> BB -> SRC Type
+ 2nd byte -> 82 -> Subsystem
+ 3rd, 4th byte -> Component ID and Reason Code
+ SRC needs to be generated on the fly depending on the state
+ of the system. All the parameters needed to generate a SRC
+ should be provided during reporting of an event/error.
+
+
+ uint32_t reason_code: Reason for failure as stated in include/fsp-elog.h
+ for Sapphire
+ Eg: Reason code for code-update failures can be
+ OPAL_RC_CU_INIT -> Initialisation failure
+ OPAL_RC_CU_FLASH -> Flash failure
+
+
+Step 2: Multiple extended user dumps can be appened to error log
+ using the below interface.
+
+ int opal_elog_update_user_dump(struct opal_errorlog *buf, unsigned char *data,
+ uint32_t tag, uint16_t size)
+
+ Parameters:
+ struct opal_errorlog *buf:
+ struct opal_errorlog *buf: struct opal_errorlog pointer returned
+ by opal_elog_create() call.
+
+ unsigned char *data: Pointer to the dump data
+
+ uint32_t tag: Unique value to identify the data.
+ Ideal to have ASCII value for 4-byte string.
+
+ uint16_t size: Size of the dump data.
+
+Step 3: Once all the data for an error is logged in, the error needs to be
+ committed in FSP.
+
+ rc = elog_fsp_commit(buf);
+ Value of 0 is returned on success.
+
+In the process of committing an error to FSP, log info is first internally
+converted to PEL format and then pushed to the FSP. All the errors logged
+in Sapphire are again pushed up to POWERNV platform by the FSP and all the errors
+reported by Sapphire and POWERNV are logged in FSP.
+
+If the user does not intend to dump various user data sections, but just
+log the error with some amount of description around that error, theb can do
+so using just the simple error logging interface
+
+log_simple_error(uint32_t reason_code, char *fmt, ...);
+
+Eg: log_simple_error(OPAL_RC_SURVE_STATUS,
+ "SURV: Error retreiving surveillance status: %d\n",
+ err_len);
+
+Using the reason code, an error log is generated with the information derived
+from the look-up table, populated and committed to FSP. All of it
+is done with just one call.
+
+Note:
+====
+* For more information regarding error logging and PEL format
+ refer to PAPR doc and P7 PEL and SRC PLDD document.
+
+* Refer to include/opal.h for all the error logging
+ interface parameters and include/fsp-pel.h for PEL
+ structures.
+
+Sample error logging:
+===================
+void report_error(int index)
+{
+ struct opal_errorlog *buf;
+ int rc;
+ char data1[] = "This is a sample user defined data section1";
+ char data2[] = "Error logging sample. These are dummy errors. Section 2";
+ char data3[] = "Sample error Sample error Sample error Sample error \
+ Sample error abcdefghijklmnopqrstuvwxyz";
+ int tag;
+
+ printf("ELOG: In machine check report error index: %d\n", index);
+
+ /* To report an error, create an error log with relevant information
+ * struct opal_errorlog *opal_elog_create(int reason_code);
+ * Call returns a pre-allocated buffer of type 'struct opal_errorlog'
+ * buffer with relevant fields updated.
+ */
+
+ buf = opal_elog_create(OPAL_RC_CHIP_MASTER);
+ if (buf == NULL) {
+ printf("ELOG: Error getting buffer.\n");
+ return;
+ }
+
+ /* In case of user wanting to add multiple sections of various dump data
+ * for better debug, data sections can be added using this interface
+ * int opal_elog_update_user_dump(struct opal_errorlog *buf, unsigned char *data,
+ * uint32_t tag, uint16_t size)
+ */
+ /* tag -> unqiue ascii tag to identify a particular data dump section */
+ tag = 0x4b4b4b4b;
+ rc = opal_elog_update_user_dump(buf, data1, tag, sizeof(data1));
+ printf("ELOG: User data updated. rc : %d \n", rc);
+
+ tag = 0x4c4c4c4c;
+ rc = opal_elog_update_user_dump(buf, data2, tag, sizeof(data2));
+ printf("ELOG: User data updated. rc : %d \n", rc);
+
+ tag = 0x4d4d4d4d;
+ rc = opal_elog_update_user_dump(buf, data3, tag, sizeof(data3));
+ printf("ELOG: User data updated. rc : %d \n", rc);
+
+ /* Once all info is updated, ready to be sent to FSP */
+ printf("ELOG:commit to FSP\n");
+ rc = elog_fsp_commit(buf);
+ if (rc != 0)
+ printf("ELOG: Re-try error logging\n");
+}
+
+ Sample output PEL dump got from FSP:
+ ===================================
+ $ errl -d -x 0x53D5EA83
+ | 00000000 50480030 01004000 20131126 05064700 PH.0..@. .....G. |
+ | 00000010 20131126 05064790 4B000109 00000000 .....G.K....... |
+ | 00000020 00000000 00000000 B0000003 53D5EA83 ............S... |
+ | 00000030 55480018 01004000 20000000 00000000 UH....@. ....... |
+ | 00000040 00002000 01005300 50530050 01004000 .. ...S.PS.P..@. |
+ | 00000050 02000008 00000048 00000080 00000000 .......H........ |
+ | 00000060 00000000 00000000 01234567 22220222 .........#Eg""." |
+ | 00000070 34560123 98768920 42423832 34303132 4V.#.v. BB824012 |
+ | 00000080 20202020 20202020 20202020 20202020 |
+ | 00000090 20202020 20202020 4548004C 01004000 EH.L..@. |
+ | 000000A0 38323436 2D4C3243 30363033 37374100 8246-L2C060377A. |
+ | 000000B0 00000000 00000000 00000000 00000000 ................ |
+ | 000000C0 00000000 00000000 00000000 00000000 ................ |
+ | 000000D0 00000000 00000000 00000000 05064700 ..............G. |
+ | 000000E0 00000000 4D54001C 01004000 38323436 ....MT....@.8246 |
+ | 000000F0 2D4C3243 30363033 37374100 00000000 -L2C060377A..... |
+ | 00000100 5544003C 01004000 4B4B4B4B 00340000 UD....@.KKKK.4.. |
+ | 00000110 54686973 20697320 61207361 6D706C65 This is a sample |
+ | 00000120 20757365 72206465 66696E65 64206461 user defined da |
+ | 00000130 74612073 65637469 6F6E3100 55440048 ta section1.UD.H |
+ | 00000140 01004000 4C4C4C4C 00400000 4572726F ..@.LLLL.@..Erro |
+ | 00000150 72206C6F 6767696E 67207361 6D706C65 r logging sample |
+ | 00000160 2E205468 65736520 61726520 64756D6D . These are dumm |
+ | 00000170 79206572 726F7273 2E205365 6374696F y errors. Sectio |
+ | 00000180 6E203200 55440071 01004000 4D4D4D4D n 2.UD.q..@.MMMM |
+ | 00000190 00690000 53616D70 6C652065 72726F72 .i..Sample error |
+ | 000001A0 2053616D 706C6520 6572726F 72205361 Sample error Sa |
+ | 000001B0 6D706C65 20657272 6F722053 616D706C mple error Sampl |
+ | 000001C0 65206572 726F7220 09090909 2053616D e error .... Sam |
+ | 000001D0 706C6520 6572726F 72206162 63646566 ple error abcdef |
+ | 000001E0 6768696A 6B6C6D6E 6F707172 73747576 ghijklmnopqrstuv |
+ | 000001F0 7778797A 00 wxyz. |
+ |------------------------------------------------------------------------------|
+ | Platform Event Log - 0x53D5EA83 |
+ |------------------------------------------------------------------------------|
+ | Private Header |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Created by : 4000 |
+ | Created at : 11/26/2013 05:06:47 |
+ | Committed at : 11/26/2013 05:06:47 |
+ | Creator Subsystem : Unknown - 0x0000004B |
+ | CSSVER : |
+ | Platform Log Id : 0xB0000003 |
+ | Entry Id : 0x53D5EA83 |
+ | Total Log Size : 644 |
+ |------------------------------------------------------------------------------|
+ | User Header |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Log Committed by : 4000 |
+ | Subsystem : Memory Subsystem |
+ | Event Scope : Unknown - 0x00000000 |
+ | Event Severity : Informational Event |
+ | Event Type : Not Applicable |
+ | Return Code : 0x00000000 |
+ | Action Flags : Report to Operating System |
+ | Action Status : Sent to Hypervisor |
+ |------------------------------------------------------------------------------|
+ | Primary System Reference Code |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Created by : 4000 |
+ | SRC Format : 0x80 |
+ | SRC Version : 0x02 |
+ | Virtual Progress SRC : False |
+ | I5/OS Service Event Bit : False |
+ | Hypervisor Dump Initiated: False |
+ | Power Control Net Fault : False |
+ | |
+ | Valid Word Count : 0x08 |
+ | Reference Code : BB824012 |
+ | Hex Words 2 - 5 : 00000080 00000000 00000000 00000000 |
+ | Hex Words 6 - 9 : 01234567 22220222 34560123 98768920 |
+ | |
+ |------------------------------------------------------------------------------|
+ | Extended User Header |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Created by : 4000 |
+ | Reporting Machine Type : 8246-L2C |
+ | Reporting Serial Number : 060377A |
+ | FW Released Ver : |
+ | FW SubSys Version : |
+ | Common Ref Time : 00/00/0000 05:06:47 |
+ | Symptom Id Len : 0 |
+ | Symptom Id : |
+ |------------------------------------------------------------------------------|
+ | Machine Type/Model & Serial Number |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Created by : 4000 |
+ | Machine Type Model : 8246-L2C |
+ | Serial Number : 060377A |
+ |------------------------------------------------------------------------------|
+ | User Defined Data |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Created by : 4000 |
+ | |
+ | 00000000 4B4B4B4B 00340000 54686973 20697320 KKKK.4..This is |
+ | 00000010 61207361 6D706C65 20757365 72206465 a sample user de |
+ | 00000020 66696E65 64206461 74612073 65637469 fined data secti |
+ | 00000030 6F6E3100 on1. |
+ | |
+ |------------------------------------------------------------------------------|
+ | User Defined Data |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Created by : 4000 |
+ | |
+ | 00000000 4C4C4C4C 00400000 4572726F 72206C6F LLLL.@..Error lo |
+ | 00000010 6767696E 67207361 6D706C65 2E205468 gging sample. Th |
+ | 00000020 65736520 61726520 64756D6D 79206572 ese are dummy er |
+ | 00000030 726F7273 2E205365 6374696F 6E203200 rors. Section 2. |
+ | |
+ |------------------------------------------------------------------------------|
+ | User Defined Data |
+ |------------------------------------------------------------------------------|
+ | Section Version : 1 |
+ | Sub-section type : 0 |
+ | Created by : 4000 |
+ | |
+ | 00000000 4D4D4D4D 00690000 53616D70 6C652065 MMMM.i..Sample e |
+ | 00000010 72726F72 2053616D 706C6520 6572726F rror Sample erro |
+ | 00000020 72205361 6D706C65 20657272 6F722053 r Sample error S |
+ | 00000030 616D706C 65206572 726F7220 09090909 ample error .... |
+ | 00000040 2053616D 706C6520 6572726F 72206162 Sample error ab |
+ | 00000050 63646566 6768696A 6B6C6D6E 6F707172 cdefghijklmnopqr |
+ | 00000060 73747576 7778797A 00 stuvwxyz. |
+ | |
+ |------------------------------------------------------------------------------|
+
diff --git a/doc/overview.txt b/doc/overview.txt
new file mode 100644
index 0000000..760f391
--- /dev/null
+++ b/doc/overview.txt
@@ -0,0 +1,116 @@
+skiboot overview
+================
+
+skiboot is firmware, loaded by the FSP. Along with loading the bootloader,
+it provides some runtime services to the OS (typically Linux).
+
+Source layout
+-------------
+asm/ small amount, mainly entry points
+ccan/ bits from CCAN
+core/ common code among machines.
+doc/ not enough here
+external/ tools to run external of sapphire.
+hdata/ all stuff going to/from FSP
+hw/ drivers for things & fsp things.
+include/ headers!
+libc/ tiny libc, from SLOF
+libfdt/ straight device tree lib
+libpore/ to manipulate PORE engine.
+
+We have a spinlock implementation in asm/lock.S
+Entry points are detailed in asm/head.S
+The main C entry point is in core/init.c: main_cpu_entry()
+
+Binaries
+--------
+The following binaries are built:
+
+skiboot.lid: is the actual lid. objdump out
+skiboot.elf: is the elf binary of it, lid comes from this
+skiboot.map: plain map of symbols
+
+Booting
+-------
+
+On boot, every thread of execution jumps to a single entry point in skiboot
+so we need to do some magic to ensure we init things properly and don't stomp
+on each other. We choose a master thread, putting everybody else into a
+spinloop.
+
+Essentially, we do this by doing an atomic fetch and inc and whoever gets 0
+gets to be the master.
+
+When we enter skiboot we also get a memory location in a register which
+is the location of a device tree for the system. We fatten out the device
+tree, turning offsets into real pointers and manipulating it where needed.
+We re-flatten the device tree before booting the OS (Linux).
+
+The main entry point is main_cpu_entry() in core/init.c, this is a carefully
+ordered init of things. The sequence is relatively well documented there.
+
+OS interface
+------------
+
+Skiboot maintains its own stack for each CPU. We do not have an ABI like
+"may use X stack on OS stack", we entirely keep to our own stack space.
+The OS (Linux) calling skiboot will never use any OS stack space and the OS
+does not need to call skiboot with a valid stack.
+
+We define an array of stacks, one for each CPU. On entry to skiboot,
+we can find out stack by multiplying our CPU number by the stack size and
+adding that to the address of the stack area.
+
+At the bottom of each stack area is a per CPU data structure, which we
+can get to by chopping off the LSBs of the stack pointer.
+
+The OPAL interface is a generic message queue. The Linux side of things
+can be found in linux/arch/powerpc/platform/powernv/opal-*.c
+
+Interrupts
+----------
+
+We don't handle interrupts in skiboot.
+
+In the future we may have to change to process machine check interrupts
+during boot.
+
+We do not have timer interrupts.
+
+
+Memory
+------
+
+We initially occupy a chunk of memory, "heap". We pass to the OS (Linux)
+a reservation of what we occupy (including stacks).
+
+In the source file include/config.h we include a memory map. This is
+manually generated, not automatically generated.
+
+We use CCAN for a bunch of helper code, turning on things like DEBUG_LOCKS
+and DEBUG_MALLOC as these are not a performance issue for us, and we like
+to be careful.
+
+In include/config.h there are defines for turning on extra tracing.
+OPAL is what we name the interface from skiboot to OS (Linux).
+
+Each CPU gets a 16k stack, which is probably more than enough. Stack
+should be used sparingly though.
+
+Important memory locations:
+
+SKIBOOT_BASE - where we sit
+
+HEAP_BASE,
+HEAP_SIZE - the location and size for heap. We reserve 4MB for
+ initial allocations.
+
+There is also SKIBOOT_SIZE (manually calculated) and DEVICE_TREE_MAX_SIZE,
+which is largely historical.
+
+Skiboot log
+-----------
+
+There is a circular log buffer that skiboot maintains. This can be
+accessed either from the FSP or through /dev/mem or through a debugfs
+patch that's currently floating around.
diff --git a/doc/pci-slot-properties.txt b/doc/pci-slot-properties.txt
new file mode 100644
index 0000000..7aeac3f
--- /dev/null
+++ b/doc/pci-slot-properties.txt
@@ -0,0 +1,17 @@
+
+PCI Slot Properties Description
+===============================
+
+The following properties have been added to the PCI Device Tree Node
+for the PCI Slot:
+
+ibm,slot-location-code System location code string for the slot connector
+ibm,slot-pluggable Boolean indicating whether the slot is pluggable
+ibm,slot-power-ctl Boolean indicating whether the slot has power control
+ibm,slot-wired-lanes The number of hardware lanes that are wired
+ibm,slot-connector-type The type of connector present
+ibm,slot-card-desc The height/length of the slot
+ibm,slot-card-mech Value indicating slot mechanicals and orientation
+ibm,slot-pwr-led-ctl Presence of slot power led, and controlling entity
+ibm,slot-attn-led-ctl Presence of slot ATTN led, and controlling entity
+
diff --git a/doc/vpd-properties.txt b/doc/vpd-properties.txt
new file mode 100644
index 0000000..ae18a7f
--- /dev/null
+++ b/doc/vpd-properties.txt
@@ -0,0 +1,19 @@
+VPD properties Description
+==========================
+
+/vpd : VPD root node
+Node name : <FRU description>@<Resource ID>
+ibm,vpd : VPD data (binary blob)
+ccin : Customer Card Identification Number
+fru-type : FRU type label (two byte ASCII Char)
+fru-number : FRU Stocking Part Number
+ibm,loc-code : Location code
+part-number : Part Number
+serial-number : Serial Number
+ibm,chip-id : Process ID
+size : DIMM Size (applicable for DIMM VPD only)
+
+Child Node:
+===========
+A child node inherits its parent's VPD information except for the
+fru-type and location code.
diff --git a/doc/xscom-node-bindings.txt b/doc/xscom-node-bindings.txt
new file mode 100644
index 0000000..0c2545e
--- /dev/null
+++ b/doc/xscom-node-bindings.txt
@@ -0,0 +1,57 @@
+XSCOM regions
+=============
+
+The top-level xscom nodes specify the mapping range from the 64-bit address
+space into the PCB address space.
+
+There's one mapping range per chip xscom, therefore one node per mapping range.
+
+/
+/xscom@<chip-base-address-0>/
+/xscom@<chip-base-address-1>/
+…
+/xscom@<chip-base-address-n>/
+
+- where <chip-base-address-n> is the xscom base address with the gcid-specific
+ bits (for chip n) OR-ed in.
+
+Each xscom node has the following properties:
+
+ * #address-cells = 1
+ * #size-cells = 1
+ * reg = <base-address[#parent-address-cells] size[#parent-size-cells]>
+ * ibm,chip-id = gcid
+ * compatible = "ibm,xscom", "ibm,power8-scom" / "ibm,power7-xscom"
+
+
+Chiplet endpoints
+=================
+
+One sub-node per endpoint. Endpoints are defined by their (port,
+endpoint-address) data on the PCB, and are named according to their endpoint
+types:
+
+/xscom@<chip-base-address>/
+/xscom@<chip-base-address>/chiptod@<endpoint-addr>
+/xscom@<chip-base-address>/lpc@<endpoint-addr>
+
+- where the <endpoint-addr> is a single address (as distinct from the current
+ (gcid,base) format), consisting of the SCOM port and SCOM endpoint bits in
+ their 31-bit address format.
+
+Each endpoint node has the following properties:
+
+ * reg = <endpoint-address[#parent-address-cells] size[#parent-size-cells]>
+ * compatible - depends on endpoint type, eg "ibm,power8-chiptod"
+
+The endpoint address specifies the address on the PCB. So, to calculate the
+MMIO address for a PCB register:
+
+ mmio_addr = <xscom-base-addr> | (pcb_addr[1:27] << 4)
+ | (pcb_addr[28:31] << 3)
+
+Where:
+
+ - xscom-base-addr is the address from the first two cells of the parent
+ node's reg property
+ - pcb_addr is the first cell of the endpoint's reg property
diff --git a/external/Makefile b/external/Makefile
new file mode 100644
index 0000000..b4b0091
--- /dev/null
+++ b/external/Makefile
@@ -0,0 +1,7 @@
+HOSTEND=$(shell uname -m | sed -e 's/^i.*86$$/LITTLE/' -e 's/^x86.*/LITTLE/' -e 's/^ppc.*/BIG/')
+CFLAGS=-g -Wall -DHAVE_$(HOSTEND)_ENDIAN -I../include
+
+dump_trace: dump_trace.c
+
+clean:
+ rm -f dump_trace *.o
diff --git a/external/dump_trace.c b/external/dump_trace.c
new file mode 100644
index 0000000..9364601
--- /dev/null
+++ b/external/dump_trace.c
@@ -0,0 +1,197 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <err.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <unistd.h>
+
+#include "../ccan/endian/endian.h"
+#include "../ccan/short_types/short_types.h"
+#include <trace_types.h>
+
+/* Handles trace from debugfs (one record at a time) or file */
+static bool get_trace(int fd, union trace *t, int *len)
+{
+ void *dest = t;
+ int r;
+
+ /* Move down any extra we read last time. */
+ if (*len >= sizeof(t->hdr) && *len >= t->hdr.len_div_8 * 8) {
+ u8 rlen = t->hdr.len_div_8 * 8;
+ memmove(dest, dest + rlen, *len - rlen);
+ *len -= rlen;
+ }
+
+ r = read(fd, dest + *len, sizeof(*t) - *len);
+ if (r < 0)
+ return false;
+
+ *len += r;
+ /* We should have a complete record. */
+ return *len >= sizeof(t->hdr) && *len >= t->hdr.len_div_8 * 8;
+}
+
+static void display_header(const struct trace_hdr *h)
+{
+ static u64 prev_ts;
+ u64 ts = be64_to_cpu(h->timestamp);
+
+ printf("%16lx (+%8lx) [%03x] : ",
+ ts, prev_ts ? (ts - prev_ts) : 0, h->cpu);
+ prev_ts = ts;
+}
+
+static void dump_fsp_event(struct trace_fsp_event *t)
+{
+ printf("FSP_EVT [st=%d] ", t->fsp_state);
+
+ switch(t->event) {
+ case TRACE_FSP_EVT_LINK_DOWN:
+ printf("LINK DOWN");
+ break;
+ case TRACE_FSP_EVT_DISR_CHG:
+ printf("DISR CHANGE (0x%08x)", t->data[0]);
+ break;
+ case TRACE_FSP_EVT_SOFT_RR:
+ printf("SOFT R&R (DISR=0x%08x)", t->data[0]);
+ break;
+ case TRACE_FSP_EVT_RR_COMPL:
+ printf("R&R COMPLETE");
+ break;
+ case TRACE_FSP_EVT_HDES_CHG:
+ printf("HDES CHANGE (0x%08x)", t->data[0]);
+ break;
+ case TRACE_FSP_EVT_POLL_IRQ:
+ printf("%s HDIR=%08x CTL=%08x PSI_IRQ=%d",
+ t->data[0] ? "IRQ " : "POLL", t->data[1],
+ t->data[2], t->data[3]);
+ break;
+ default:
+ printf("Unknown %d (d: %08x %08x %08x %08x)",
+ t->event, t->data[0], t->data[1],
+ t->data[2], t->data[3]);
+ }
+ printf("\n");
+}
+
+static void dump_opal_call(struct trace_opal *t)
+{
+ unsigned int i, n;
+
+ printf("OPAL CALL %"PRIu64, be64_to_cpu(t->token));
+ printf(" LR=0x%016"PRIx64" SP=0x%016"PRIx64,
+ be64_to_cpu(t->lr), be64_to_cpu(t->sp));
+ n = (t->hdr.len_div_8 * 8 - offsetof(union trace, opal.r3_to_11))
+ / sizeof(u64);
+ for (i = 0; i < n; i++)
+ printf(" R%u=0x%016"PRIx64,
+ i+3, be64_to_cpu(t->r3_to_11[i]));
+ printf("\n");
+}
+
+static void dump_fsp_msg(struct trace_fsp_msg *t)
+{
+ unsigned int i;
+
+ printf("FSP_MSG: CMD %u SEQ %u MOD %u SUB %u DLEN %u %s [",
+ be32_to_cpu(t->word0) & 0xFFFF,
+ be32_to_cpu(t->word0) >> 16,
+ be32_to_cpu(t->word1) >> 8,
+ be32_to_cpu(t->word1) & 0xFF,
+ t->dlen,
+ t->dir == TRACE_FSP_MSG_IN ? "IN" :
+ (t->dir == TRACE_FSP_MSG_OUT ? "OUT" : "UNKNOWN"));
+
+ for (i = 0; i < t->dlen; i++)
+ printf("%s%02x", i ? " " : "", t->data[i]);
+ printf("]\n");
+}
+
+static void dump_uart(struct trace_uart *t)
+{
+ switch(t->ctx) {
+ case TRACE_UART_CTX_IRQ:
+ printf(": IRQ IRQEN=%d IN_CNT=%d\n",
+ !t->irq_state, t->in_count);
+ break;
+ case TRACE_UART_CTX_POLL:
+ printf(": POLL IRQEN=%d IN_CNT=%d\n",
+ !t->irq_state, t->in_count);
+ break;
+ case TRACE_UART_CTX_READ:
+ printf(": READ IRQEN=%d IN_CNT=%d READ=%d\n",
+ !t->irq_state, t->in_count, t->cnt);
+ break;
+ default:
+ printf(": ???? IRQEN=%d IN_CNT=%d\n",
+ !t->irq_state, t->in_count);
+ break;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int fd, len = 0;
+ union trace t;
+ const char *in = "/sys/kernel/debug/powerpc/opal-trace";
+
+ if (argc > 2)
+ errx(1, "Usage: dump_trace [file]");
+
+ if (argv[1])
+ in = argv[1];
+ fd = open(in, O_RDONLY);
+ if (fd < 0)
+ err(1, "Opening %s", in);
+
+ while (get_trace(fd, &t, &len)) {
+ display_header(&t.hdr);
+ switch (t.hdr.type) {
+ case TRACE_REPEAT:
+ printf("REPEATS: %u times\n",
+ be32_to_cpu(t.repeat.num));
+ break;
+ case TRACE_OVERFLOW:
+ printf("**OVERFLOW**: %"PRIu64" bytes missed\n",
+ be64_to_cpu(t.overflow.bytes_missed));
+ break;
+ case TRACE_OPAL:
+ dump_opal_call(&t.opal);
+ break;
+ case TRACE_FSP_MSG:
+ dump_fsp_msg(&t.fsp_msg);
+ break;
+ case TRACE_FSP_EVENT:
+ dump_fsp_event(&t.fsp_evt);
+ break;
+ case TRACE_UART:
+ dump_uart(&t.uart);
+ break;
+ default:
+ printf("UNKNOWN(%u) CPU %u length %u\n",
+ t.hdr.type, be16_to_cpu(t.hdr.cpu),
+ t.hdr.len_div_8 * 8);
+ }
+ }
+ return 0;
+}
diff --git a/external/trace.c b/external/trace.c
new file mode 100644
index 0000000..43ed19b
--- /dev/null
+++ b/external/trace.c
@@ -0,0 +1,105 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* This example code shows how to read from the trace buffer. */
+#include <external/trace.h>
+#include <trace_types.h>
+#include <errno.h>
+
+bool trace_empty(const struct tracebuf *tb)
+{
+ const struct trace_repeat *rep;
+
+ if (tb->rpos == tb->end)
+ return true;
+
+ /*
+ * If we have a single element only, and it's a repeat buffer
+ * we've already seen every repeat for (yet which may be
+ * incremented in future), we're also empty.
+ */
+ rep = (void *)tb->buf + (tb->rpos & tb->mask);
+ if (tb->end != tb->rpos + sizeof(*rep))
+ return false;
+
+ if (rep->type != TRACE_REPEAT)
+ return false;
+
+ if (rep->num != tb->last_repeat)
+ return false;
+
+ return true;
+}
+
+/* You can't read in parallel, so some locking required in caller. */
+bool trace_get(union trace *t, struct tracebuf *tb)
+{
+ u64 start;
+ size_t len = sizeof(*t) < tb->max_size ? sizeof(*t) : tb->max_size;
+
+ if (trace_empty(tb))
+ return false;
+
+again:
+ /*
+ * The actual buffer is slightly larger than tbsize, so this
+ * memcpy is always valid.
+ */
+ memcpy(t, tb->buf + (tb->rpos & tb->mask), len);
+
+ rmb(); /* read barrier, so we read tb->start after copying record. */
+
+ start = tb->start;
+
+ /* Now, was that overwritten? */
+ if (tb->rpos < start) {
+ /* Create overflow record. */
+ t->overflow.unused64 = 0;
+ t->overflow.type = TRACE_OVERFLOW;
+ t->overflow.len_div_8 = sizeof(t->overflow) / 8;
+ t->overflow.bytes_missed = start - tb->rpos;
+ tb->rpos += t->overflow.bytes_missed;
+ return true;
+ }
+
+ /* Repeat entries need special handling */
+ if (t->hdr.type == TRACE_REPEAT) {
+ u32 num = t->repeat.num;
+
+ /* In case we've read some already... */
+ t->repeat.num -= tb->last_repeat;
+
+ /* Record how many repeats we saw this time. */
+ tb->last_repeat = num;
+
+ /* Don't report an empty repeat buffer. */
+ if (t->repeat.num == 0) {
+ /*
+ * This can't be the last buffer, otherwise
+ * trace_empty would have returned true.
+ */
+ assert(tb->end > tb->rpos + t->hdr.len_div_8 * 8);
+ /* Skip to next entry. */
+ tb->rpos += t->hdr.len_div_8 * 8;
+ tb->last_repeat = 0;
+ goto again;
+ }
+ } else {
+ tb->last_repeat = 0;
+ tb->rpos += t->hdr.len_div_8 * 8;
+ }
+
+ return true;
+}
diff --git a/external/trace.h b/external/trace.h
new file mode 100644
index 0000000..4d2dbc7
--- /dev/null
+++ b/external/trace.h
@@ -0,0 +1,20 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Is this tracebuf empty? */
+bool trace_empty(const struct tracebuf *tracebuf);
+
+/* Get the next trace from this buffer (false if empty). */
+bool trace_get(union trace *t, struct tracebuf *tb);
diff --git a/hdata/Makefile.inc b/hdata/Makefile.inc
new file mode 100644
index 0000000..44f8c86
--- /dev/null
+++ b/hdata/Makefile.inc
@@ -0,0 +1,8 @@
+# -*-Makefile-*-
+
+SUBDIRS += hdata
+HDATA_OBJS = spira.o paca.o pcia.o hdif.o memory.o fsp.o iohub.o vpd.o slca.o
+HDATA_OBJS += cpu-common.o vpd-common.o hostservices.o
+DEVSRC_OBJ = hdata/built-in.o
+
+$(DEVSRC_OBJ): $(HDATA_OBJS:%=hdata/%)
diff --git a/hdata/cpu-common.c b/hdata/cpu-common.c
new file mode 100644
index 0000000..f08fa27
--- /dev/null
+++ b/hdata/cpu-common.c
@@ -0,0 +1,280 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include "spira.h"
+#include <cpu.h>
+#include <ccan/str/str.h>
+#include <device.h>
+
+#include "hdata.h"
+
+struct dt_node * add_core_common(struct dt_node *cpus,
+ const struct sppcia_cpu_cache *cache,
+ const struct sppaca_cpu_timebase *tb,
+ uint32_t int_server, bool okay)
+{
+ const char *name;
+ struct dt_node *cpu;
+ uint32_t version;
+ uint64_t freq;
+ const uint8_t pa_features[] = {
+ 6, 0, 0xf6, 0x3f, 0xc7, 0x00, 0x80, 0xc0 };
+
+ printf(" Cache: I=%u D=%u/%u/%u/%u\n",
+ be32_to_cpu(cache->icache_size_kb),
+ be32_to_cpu(cache->l1_dcache_size_kb),
+ be32_to_cpu(cache->l2_dcache_size_kb),
+ be32_to_cpu(cache->l3_dcache_size_kb),
+ be32_to_cpu(cache->l35_dcache_size_kb));
+
+ /* Use the boot CPU PVR to make up a CPU name in the device-tree
+ * since the HDAT doesn't seem to tell....
+ */
+ version = mfspr(SPR_PVR);
+ switch(PVR_TYPE(version)) {
+ case PVR_TYPE_P7:
+ name = "PowerPC,POWER7";
+ break;
+ case PVR_TYPE_P7P:
+ name = "PowerPC,POWER7+";
+ break;
+ case PVR_TYPE_P8E:
+ case PVR_TYPE_P8:
+ name = "PowerPC,POWER8";
+ break;
+ default:
+ name = "PowerPC,Unknown";
+ }
+
+ cpu = dt_new_addr(cpus, name, int_server);
+ assert(cpu);
+ dt_add_property_string(cpu, "device_type", "cpu");
+ dt_add_property_string(cpu, "status", okay ? "okay" : "bad");
+ dt_add_property_cells(cpu, "reg", int_server);
+ dt_add_property_cells(cpu, "cpu-version", version);
+ dt_add_property(cpu, "64-bit", NULL, 0);
+ dt_add_property(cpu, "32-64-bridge", NULL, 0);
+ dt_add_property(cpu, "graphics", NULL, 0);
+ dt_add_property(cpu, "general-purpose", NULL, 0);
+ dt_add_property_cells(cpu, "ibm,processor-segment-sizes",
+ 0x1c, 0x28, 0xffffffff, 0xffffffff);
+ dt_add_property_cells(cpu, "ibm,processor-page-sizes",
+ 0xc, 0x10, 0x18, 0x22);
+
+ /* Page size encodings appear to be the same for P7 and P8 */
+ dt_add_property_cells(cpu, "ibm,segment-page-sizes",
+ 0x0c, 0x000, 3, 0x0c, 0x0000, /* 4K seg 4k pages */
+ 0x10, 0x0007, /* 4K seg 64k pages */
+ 0x18, 0x0038, /* 4K seg 16M pages */
+ 0x10, 0x110, 2, 0x10, 0x0001, /* 64K seg 64k pages */
+ 0x18, 0x0008, /* 64K seg 16M pages */
+ 0x18, 0x100, 1, 0x18, 0x0000, /* 16M seg 16M pages */
+ 0x22, 0x120, 1, 0x22, 0x0003); /* 16G seg 16G pages */
+
+ dt_add_property(cpu, "ibm,pa-features",
+ pa_features, sizeof(pa_features));
+ dt_add_property_cells(cpu, "ibm,slb-size", 0x20);
+
+ dt_add_property_cells(cpu, "ibm,vmx", 0x2);
+ dt_add_property_cells(cpu, "ibm,dfp", 0x2);
+ dt_add_property_cells(cpu, "ibm,purr", 0x1);
+ dt_add_property_cells(cpu, "ibm,spurr", 0x1);
+
+ /*
+ * Do not create "clock-frequency" if the frequency doesn't
+ * fit in a single cell
+ */
+ freq = ((uint64_t)be32_to_cpu(tb->actual_clock_speed)) * 1000000ul;
+ if (freq <= 0xfffffffful)
+ dt_add_property_cells(cpu, "clock-frequency", freq);
+ dt_add_property_cells(cpu, "ibm,extended-clock-frequency",
+ hi32(freq), lo32(freq));
+
+ /* FIXME: Hardcoding is bad. */
+ dt_add_property_cells(cpu, "timebase-frequency", 512000000);
+ dt_add_property_cells(cpu, "ibm,extended-timebase-frequency",
+ 0, 512000000);
+
+ dt_add_property_cells(cpu, "reservation-granule-size",
+ be32_to_cpu(cache->reservation_size));
+
+ dt_add_property_cells(cpu, "d-tlb-size",
+ be32_to_cpu(cache->dtlb_entries));
+ dt_add_property_cells(cpu, "i-tlb-size",
+ be32_to_cpu(cache->itlb_entries));
+ /* Assume unified TLB */
+ dt_add_property_cells(cpu, "tlb-size",
+ be32_to_cpu(cache->dtlb_entries));
+ dt_add_property_cells(cpu, "d-tlb-sets",
+ be32_to_cpu(cache->dtlb_assoc_sets));
+ dt_add_property_cells(cpu, "i-tlb-sets",
+ be32_to_cpu(cache->itlb_assoc_sets));
+ dt_add_property_cells(cpu, "tlb-sets",
+ be32_to_cpu(cache->dtlb_assoc_sets));
+
+ dt_add_property_cells(cpu, "d-cache-block-size",
+ be32_to_cpu(cache->dcache_block_size));
+ dt_add_property_cells(cpu, "i-cache-block-size",
+ be32_to_cpu(cache->icache_block_size));
+ dt_add_property_cells(cpu, "d-cache-size",
+ be32_to_cpu(cache->l1_dcache_size_kb)*1024);
+ dt_add_property_cells(cpu, "i-cache-size",
+ be32_to_cpu(cache->icache_size_kb)*1024);
+ dt_add_property_cells(cpu, "i-cache-sets",
+ be32_to_cpu(cache->icache_assoc_sets));
+ dt_add_property_cells(cpu, "d-cache-sets",
+ be32_to_cpu(cache->dcache_assoc_sets));
+
+ if (cache->icache_line_size != cache->icache_block_size)
+ dt_add_property_cells(cpu, "i-cache-line-size",
+ be32_to_cpu(cache->icache_line_size));
+ if (cache->l1_dcache_line_size != cache->dcache_block_size)
+ dt_add_property_cells(cpu, "d-cache-line-size",
+ be32_to_cpu(cache->l1_dcache_line_size));
+ return cpu;
+}
+
+void add_core_attr(struct dt_node *cpu, uint32_t attr)
+{
+ if (attr & CPU_ATTR_UNIFIED_PL1)
+ dt_add_property(cpu, "cache-unified", NULL, 0);
+ if (attr & CPU_ATTR_SPLIT_TLB)
+ dt_add_property(cpu, "tlb-split", NULL, 0);
+ if (attr & CPU_ATTR_TLBIA)
+ dt_add_property(cpu, "tlbia", NULL, 0);
+ if (attr & CPU_ATTR_PERF_MONITOR)
+ dt_add_property_cells(cpu, "performance-monitor", 0, 1);
+ if (attr & CPU_ATTR_EXTERN_CONT)
+ dt_add_property(cpu, "external-control", NULL, 0);
+}
+
+static struct dt_node *create_cache_node(struct dt_node *cpus,
+ const struct sppcia_cpu_cache *cache,
+ const char *name, uint32_t unit_addr,
+ int okay)
+{
+ struct dt_node *node;
+
+ node = dt_new_addr(cpus, name, unit_addr);
+ assert(node);
+
+ dt_add_property_string(node, "device_type", "cache");
+ dt_add_property_cells(node, "reg", unit_addr);
+ dt_add_property_string(node, "status", okay ? "okay" : "bad");
+ dt_add_property(node, "cache-unified", NULL, 0);
+
+ /* Assume cache associavitity sets is same for L2, L3 and L3.5 */
+ dt_add_property_cells(node, "d-cache-sets",
+ be32_to_cpu(cache->l2_cache_assoc_sets));
+ dt_add_property_cells(node, "i-cache-sets",
+ be32_to_cpu(cache->l2_cache_assoc_sets));
+
+ return node;
+}
+
+static struct dt_node *l35_cache_node(struct dt_node *cpus,
+ const struct sppcia_cpu_cache *cache,
+ uint32_t unit_addr, int okay)
+{
+ struct dt_node *node;
+
+ node = create_cache_node(cpus, cache, "l35-cache", unit_addr, okay);
+
+ dt_add_property_cells(node, "d-cache-size",
+ be32_to_cpu(cache->l35_dcache_size_kb) * 1024);
+ dt_add_property_cells(node, "i-cache-size",
+ be32_to_cpu(cache->l35_dcache_size_kb) * 1024);
+
+ if (cache->icache_line_size != cache->icache_block_size)
+ dt_add_property_cells(node, "i-cache-line-size",
+ be32_to_cpu(cache->icache_line_size));
+ if (cache->l35_cache_line_size != cache->dcache_block_size)
+ dt_add_property_cells(node, "d-cache-line-size",
+ be32_to_cpu(cache->l35_cache_line_size));
+
+ return node;
+}
+
+static struct dt_node *l3_cache_node(struct dt_node *cpus,
+ const struct sppcia_cpu_cache *cache,
+ uint32_t unit_addr, int okay)
+{
+ struct dt_node *node;
+
+ node = create_cache_node(cpus, cache, "l3-cache", unit_addr, okay);
+
+ dt_add_property_cells(node, "d-cache-size",
+ be32_to_cpu(cache->l3_dcache_size_kb) * 1024);
+ dt_add_property_cells(node, "i-cache-size",
+ be32_to_cpu(cache->l3_dcache_size_kb) * 1024);
+
+ if (cache->icache_line_size != cache->icache_block_size)
+ dt_add_property_cells(node, "i-cache-line-size",
+ be32_to_cpu(cache->icache_line_size));
+ if (cache->l3_line_size != cache->dcache_block_size)
+ dt_add_property_cells(node, "d-cache-line-size",
+ be32_to_cpu(cache->l3_line_size));
+
+ return node;
+}
+
+static struct dt_node *l2_cache_node(struct dt_node *cpus,
+ const struct sppcia_cpu_cache *cache,
+ uint32_t unit_addr, int okay)
+{
+ struct dt_node *node;
+
+ node = create_cache_node(cpus, cache, "l2-cache", unit_addr, okay);
+
+ dt_add_property_cells(node, "d-cache-size",
+ be32_to_cpu(cache->l2_dcache_size_kb) * 1024);
+ dt_add_property_cells(node, "i-cache-size",
+ be32_to_cpu(cache->l2_dcache_size_kb) * 1024);
+
+ if (cache->icache_line_size != cache->icache_block_size)
+ dt_add_property_cells(node, "i-cache-line-size",
+ be32_to_cpu(cache->icache_line_size));
+ if (cache->l2_line_size != cache->dcache_block_size)
+ dt_add_property_cells(node, "d-cache-line-size",
+ be32_to_cpu(cache->l2_line_size));
+
+ return node;
+}
+
+uint32_t add_core_cache_info(struct dt_node *cpus,
+ const struct sppcia_cpu_cache *cache,
+ uint32_t int_server, int okay)
+{
+ struct dt_node *l2_node, *l3_node, *l35_node;
+ uint32_t unit_addr;
+
+ /* Use Processor Interrupt Line to genarate cache unit address */
+ unit_addr = 0x20 << 24 | int_server;
+ l2_node = l2_cache_node(cpus, cache, unit_addr, okay);
+
+ unit_addr = 0x30 << 24 | int_server;
+ l3_node = l3_cache_node(cpus, cache, unit_addr, okay);
+ /* Represents the next level of cache in the memory hierarchy */
+ dt_add_property_cells(l2_node, "l2-cache", l3_node->phandle);
+
+ if (be32_to_cpu(cache->l35_dcache_size_kb)) {
+ unit_addr = 0x35 << 24 | int_server;
+ l35_node = l35_cache_node(cpus, cache, unit_addr, okay);
+ dt_add_property_cells(l3_node, "l2-cache", l35_node->phandle);
+ }
+
+ return l2_node->phandle;
+}
diff --git a/hdata/fsp.c b/hdata/fsp.c
new file mode 100644
index 0000000..cf6bc96
--- /dev/null
+++ b/hdata/fsp.c
@@ -0,0 +1,200 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <device.h>
+#include "spira.h"
+#include <cpu.h>
+#include <memory.h>
+#include <vpd.h>
+#include <ccan/str/str.h>
+#include <device_tree.h>
+#include <interrupts.h>
+
+#include "hdata.h"
+
+static struct dt_node *fsp_create_node(const void *spss, int i,
+ struct dt_node *parent)
+{
+ const struct spss_sp_impl *sp_impl;
+ struct dt_node *node;
+ unsigned int mask;
+
+ /* Find an check the SP Implementation structure */
+ sp_impl = HDIF_get_idata(spss, SPSS_IDATA_SP_IMPL, NULL);
+ if (!CHECK_SPPTR(sp_impl)) {
+ prerror("FSP #%d: SPSS/SP_Implementation not found !\n", i);
+ return NULL;
+ }
+
+ printf("FSP #%d: FSP HW version %d, SW version %d, chip DD%d.%d\n",
+ i, sp_impl->hw_version, sp_impl->sw_version,
+ sp_impl->chip_version >> 4, sp_impl->chip_version & 0xf);
+ mask = SPSS_SP_IMPL_FLAGS_INSTALLED | SPSS_SP_IMPL_FLAGS_FUNCTIONAL;
+ if ((be16_to_cpu(sp_impl->func_flags) & mask) != mask) {
+ prerror("FSP #%d: FSP not installed or not functional\n", i);
+ return NULL;
+ }
+
+ node = dt_new_addr(parent, "fsp", i);
+ assert(node);
+ dt_add_property_cells(node, "reg", i);
+
+ if (sp_impl->hw_version == 1) {
+ dt_add_property_strings(node, "compatible", "ibm,fsp",
+ "ibm,fsp1");
+ /* Offset into the FSP MMIO space where the mailbox
+ * registers are */
+ /* seen in the FSP1 spec */
+ dt_add_property_cells(node, "reg-offset", 0xb0016000);
+ } else if (sp_impl->hw_version == 2) {
+ dt_add_property_strings(node, "compatible", "ibm,fsp",
+ "ibm,fsp2");
+ dt_add_property_cells(node, "reg-offset", 0xb0011000);
+ }
+ dt_add_property_cells(node, "hw-version", sp_impl->hw_version);
+ dt_add_property_cells(node, "sw-version", sp_impl->sw_version);
+
+ if (be16_to_cpu(sp_impl->func_flags) & SPSS_SP_IMPL_FLAGS_PRIMARY)
+ dt_add_property(node, "primary", NULL, 0);
+
+ return node;
+}
+
+static uint32_t fsp_create_link(const struct spss_iopath *iopath, int index,
+ int fsp_index)
+{
+ struct dt_node *node;
+ const char *ststr;
+ bool current = false;
+ bool working = false;
+ uint32_t chip_id;
+
+ switch(iopath->psi.link_status) {
+ case SPSS_IO_PATH_PSI_LINK_BAD_FRU:
+ ststr = "Broken";
+ break;
+ case SPSS_IO_PATH_PSI_LINK_CURRENT:
+ ststr = "Active";
+ current = working = true;
+ break;
+ case SPSS_IO_PATH_PSI_LINK_BACKUP:
+ ststr = "Backup";
+ working = true;
+ break;
+ default:
+ ststr = "Unknown";
+ }
+ printf("FSP #%d: IO PATH %d is %s PSI Link, GXHB at %llx\n",
+ fsp_index, index, ststr, (long long)iopath->psi.gxhb_base);
+
+ chip_id = pcid_to_chip_id(iopath->psi.proc_chip_id);
+ node = dt_find_compatible_node_on_chip(dt_root, NULL, "ibm,psihb-x",
+ chip_id);
+ if (!node) {
+ prerror("FSP #%d: Can't find psihb node for link %d\n",
+ fsp_index, index);
+ } else {
+ if (current)
+ dt_add_property(node, "boot-link", NULL, 0);
+ dt_add_property_strings(node, "status", working ? "ok" : "bad");
+ }
+
+ return chip_id;
+}
+
+static void fsp_create_links(const void *spss, int index,
+ struct dt_node *fsp_node)
+{
+ uint32_t *links = NULL;
+ unsigned int i, lp, lcount = 0;
+ int count;
+
+ count = HDIF_get_iarray_size(spss, SPSS_IDATA_SP_IOPATH);
+ if (count < 0) {
+ prerror("FSP #%d: Can't find IO PATH array size !\n", index);
+ return;
+ }
+ printf("FSP #%d: Found %d IO PATH\n", index, count);
+
+ /* Iterate all links */
+ for (i = 0; i < count; i++) {
+ const struct spss_iopath *iopath;
+ unsigned int iopath_sz;
+ uint32_t chip;
+
+ iopath = HDIF_get_iarray_item(spss, SPSS_IDATA_SP_IOPATH,
+ i, &iopath_sz);
+ if (!CHECK_SPPTR(iopath)) {
+ prerror("FSP #%d: Can't find IO PATH %d\n", index, i);
+ break;
+ }
+ if (iopath->iopath_type != SPSS_IOPATH_TYPE_PSI) {
+ prerror("FSP #%d: Unsupported IO PATH %d type 0x%04x\n",
+ index, i, iopath->iopath_type);
+ continue;
+ }
+
+ chip = fsp_create_link(iopath, i, index);
+ lp = lcount++;
+ links = realloc(links, 4 * lcount);
+ links[lp] = chip;
+ }
+ if (links)
+ dt_add_property(fsp_node, "ibm,psi-links", links, lcount * 4);
+}
+
+void fsp_parse(void)
+{
+ const void *base_spss, *spss;
+ struct dt_node *fsp_root, *fsp_node;
+ int i;
+
+ /*
+ * Note on DT representation of the PSI links and FSPs:
+ *
+ * We create a XSCOM node for each PSI host bridge(one per chip),
+ *
+ * This is done in spira.c
+ *
+ * We do not create the /psi MMIO variant at this stage, it will
+ * be added by the psi driver in skiboot.
+ *
+ * We do not put the FSP(s) as children of these. Instead, we create
+ * a top-level /fsps node with the FSPs as children.
+ *
+ * Each FSP then has a "links" property which is an array of chip IDs
+ */
+
+ /* Find SPSS in SPIRA */
+ base_spss = get_hdif(&spira.ntuples.sp_subsys, SPSS_HDIF_SIG);
+ if (!base_spss) {
+ printf("FSP: No SPSS in SPIRA !\n");
+ return;
+ }
+
+ fsp_root = dt_new(dt_root, "fsps");
+ assert(fsp_root);
+ dt_add_property_cells(fsp_root, "#address-cells", 1);
+ dt_add_property_cells(fsp_root, "#size-cells", 0);
+
+ /* Iterate FSPs in SPIRA */
+ for_each_ntuple_idx(&spira.ntuples.sp_subsys, spss, i, SPSS_HDIF_SIG) {
+ fsp_node = fsp_create_node(spss, i, fsp_root);
+ if (fsp_node)
+ fsp_create_links(spss, i, fsp_node);
+ }
+}
+
diff --git a/hdata/hdata.h b/hdata/hdata.h
new file mode 100644
index 0000000..b9b7721
--- /dev/null
+++ b/hdata/hdata.h
@@ -0,0 +1,49 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HDATA_H
+#define __HDATA_H
+
+struct dt_node;
+
+extern void paca_parse(void);
+extern bool pcia_parse(void);
+extern void fsp_parse(void);
+extern void io_parse(void);
+extern struct dt_node *dt_add_vpd_node(const struct HDIF_common_hdr *hdr,
+ int indx_fru, int indx_vpd);
+extern void vpd_parse(void);
+
+extern struct dt_node *find_xscom_for_chip(uint32_t chip_id);
+extern uint32_t pcid_to_chip_id(uint32_t proc_chip_id);
+
+extern struct dt_node *add_core_common(struct dt_node *cpus,
+ const struct sppaca_cpu_cache *cache,
+ const struct sppaca_cpu_timebase *tb,
+ uint32_t int_server, bool okay);
+extern void add_core_attr(struct dt_node *cpu, uint32_t attr);
+extern uint32_t add_core_cache_info(struct dt_node *cpus,
+ const struct sppcia_cpu_cache *cache,
+ uint32_t int_server, int okay);
+extern const struct slca_entry *slca_get_entry(uint16_t slca_index);
+extern const char *slca_get_vpd_name(uint16_t slca_index);
+extern const char *slca_get_loc_code_index(uint16_t slca_index);
+extern void slca_vpd_add_loc_code(struct dt_node *node, uint16_t slca_index);
+
+extern bool hservices_from_hdat(const void *fdt, size_t size);
+
+#endif /* __HDATA_H */
+
diff --git a/hdata/hdif.c b/hdata/hdif.c
new file mode 100644
index 0000000..916b4dd
--- /dev/null
+++ b/hdata/hdif.c
@@ -0,0 +1,140 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdif.h"
+
+const void *HDIF_get_idata(const struct HDIF_common_hdr *hdif, unsigned int di,
+ unsigned int *size)
+{
+ const struct HDIF_common_hdr *hdr = hdif;
+ const struct HDIF_idata_ptr *iptr;
+
+ if (hdr->d1f0 != BE16_TO_CPU(0xd1f0)) {
+ prerror("HDIF: Bad header format !\n");
+ return NULL;
+ }
+
+ if (di >= be16_to_cpu(hdr->idptr_count)) {
+ prerror("HDIF: idata index out of range !\n");
+ return NULL;
+ }
+
+ iptr = (void *)hdif + be32_to_cpu(hdr->idptr_off)
+ + di * sizeof(struct HDIF_idata_ptr);
+
+ if (size)
+ *size = be32_to_cpu(iptr->size);
+
+ return (void *)hdif + be32_to_cpu(iptr->offset);
+}
+
+const void *HDIF_get_iarray_item(const struct HDIF_common_hdr *hdif,
+ unsigned int di, unsigned int ai,
+ unsigned int *size)
+{
+ const struct HDIF_array_hdr *ahdr;
+ unsigned int asize;
+ const void *arr;
+
+ arr = HDIF_get_idata(hdif, di, &asize);
+ if (!arr)
+ return NULL;
+
+ if (asize < sizeof(struct HDIF_array_hdr)) {
+ prerror("HDIF: idata block too small for array !\n");
+ return NULL;
+ }
+
+ ahdr = arr;
+
+ if (ai >= be32_to_cpu(ahdr->ecnt)) {
+ prerror("HDIF: idata array index out of range !\n");
+ return NULL;
+ }
+
+ if (size)
+ *size = be32_to_cpu(ahdr->eactsz);
+
+ return arr + be32_to_cpu(ahdr->offset) + ai * be32_to_cpu(ahdr->esize);
+}
+
+int HDIF_get_iarray_size(const struct HDIF_common_hdr *hdif, unsigned int di)
+{
+ const struct HDIF_array_hdr *ahdr;
+ unsigned int asize;
+ const void *arr;
+
+ arr = HDIF_get_idata(hdif, di, &asize);
+ if (!arr)
+ return -1;
+
+ if (asize < sizeof(struct HDIF_array_hdr)) {
+ prerror("HDIF: idata block too small for array !\n");
+ return -1;
+ }
+
+ ahdr = arr;
+ return be32_to_cpu(ahdr->ecnt);
+}
+
+struct HDIF_child_ptr *
+HDIF_child_arr(const struct HDIF_common_hdr *hdif, unsigned int idx)
+{
+ struct HDIF_child_ptr *children;
+
+ children = (void *)hdif + be32_to_cpu(hdif->child_off);
+
+ if (idx >= be16_to_cpu(hdif->child_count)) {
+ prerror("HDIF: child array idx out of range!\n");
+ return NULL;
+ }
+
+ return &children[idx];
+}
+
+struct HDIF_common_hdr *HDIF_child(const struct HDIF_common_hdr *hdif,
+ const struct HDIF_child_ptr *child,
+ unsigned int idx,
+ const char *eyecatcher)
+{
+ void *base = (void *)hdif;
+ struct HDIF_common_hdr *ret;
+ long child_off;
+
+ /* child must be in hdif's child array */
+ child_off = (void *)child - (base + be32_to_cpu(hdif->child_off));
+ assert(child_off % sizeof(struct HDIF_child_ptr) == 0);
+ assert(child_off / sizeof(struct HDIF_child_ptr)
+ < be16_to_cpu(hdif->child_count));
+
+ assert(idx < be32_to_cpu(child->count));
+
+ if (be32_to_cpu(child->size) < sizeof(struct HDIF_common_hdr)) {
+ prerror("HDIF: %s child #%i too small: %u\n",
+ eyecatcher, idx, be32_to_cpu(child->size));
+ return NULL;
+ }
+
+ ret = base + be32_to_cpu(child->offset)
+ + be32_to_cpu(child->size) * idx;
+ if (!HDIF_check(ret, eyecatcher)) {
+ prerror("HDIF: %s child #%i bad type\n",
+ eyecatcher, idx);
+ return NULL;
+ }
+
+ return ret;
+}
diff --git a/hdata/hdif.h b/hdata/hdif.h
new file mode 100644
index 0000000..fad7454
--- /dev/null
+++ b/hdata/hdif.h
@@ -0,0 +1,141 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HDIF_H
+#define __HDIF_H
+
+#include <skiboot.h>
+#include <types.h>
+#include <ccan/endian/endian.h>
+
+struct HDIF_common_hdr {
+ __be16 d1f0; /* 0xd1f0 */
+ char id[6]; /* eye catcher string */
+ __be16 instnum; /* instance number */
+ __be16 version; /* version */
+ __be32 total_len; /* total structure length */
+ __be32 hdr_len; /* header length (currently 0x20) */
+ __be32 idptr_off; /* offset to idata pointers */
+ __be16 idptr_count; /* number of idata pointers */
+ __be16 child_count; /* number of child structures */
+ __be32 child_off; /* offset to child structures array */
+} __packed __align(0x10);
+
+struct HDIF_idata_ptr {
+ __be32 offset;
+ __be32 size;
+} __packed __align(0x8);
+
+struct HDIF_array_hdr {
+ __be32 offset;
+ __be32 ecnt;
+ __be32 esize;
+ __be32 eactsz;
+} __packed __align(0x10);
+
+struct HDIF_child_ptr {
+ __be32 offset;
+ __be32 size;
+ __be32 count;
+} __packed;
+
+#define HDIF_HDR_LEN (sizeof(struct HDIF_common_hdr))
+#define HDIF_ARRAY_OFFSET (sizeof(struct HDIF_array_hdr))
+
+#define HDIF_ID(_id) .d1f0 = CPU_TO_BE16(0xd1f0), .id = _id
+
+#define HDIF_SIMPLE_HDR(id, vers, type) \
+{ \
+ HDIF_ID(id), \
+ .instnum = CPU_TO_BE16(0), \
+ .version = CPU_TO_BE16(vers), \
+ .total_len = CPU_TO_BE32(sizeof(type)), \
+ .hdr_len = CPU_TO_BE32(HDIF_HDR_LEN), \
+ .idptr_off = CPU_TO_BE32(HDIF_HDR_LEN), \
+ .idptr_count = CPU_TO_BE16(1), \
+ .child_count = CPU_TO_BE16(0), \
+ .child_off = CPU_TO_BE32(0), \
+}
+
+#define HDIF_IDATA_PTR(_offset, _size) \
+{ \
+ .offset = CPU_TO_BE32(_offset), \
+ .size = _size, \
+}
+
+static inline bool HDIF_check(const void *hdif, const char id[])
+{
+ const struct HDIF_common_hdr *hdr = hdif;
+
+ return hdr->d1f0 == CPU_TO_BE16(0xd1f0) &&
+ memcmp(hdr->id, id, sizeof(hdr->id)) == 0;
+}
+
+/* HDIF_get_idata - Get a pointer to internal data block
+ *
+ * @hdif : HDIF structure pointer
+ * @di : Index of the idata pointer
+ * @size : Return the data size (or NULL if ignored)
+ */
+extern const void *HDIF_get_idata(const struct HDIF_common_hdr *hdif,
+ unsigned int di,
+ unsigned int *size);
+
+/* HDIF_get_iarray - Get a pointer to an elemnt of an internal data array
+ *
+ * @hdif : HDIF structure pointer
+ * @di : Index of the idata pointer
+ * @ai : Index in the resulting array
+ * @size : Return the entry actual size (or NULL if ignored)
+ */
+extern const void *HDIF_get_iarray_item(const struct HDIF_common_hdr *hdif,
+ unsigned int di,
+ unsigned int ai, unsigned int *size);
+
+/* HDIF_get_iarray_size - Get the number of elements of an internal data array
+ *
+ * @hdif : HDIF structure pointer
+ * @di : Index of the idata pointer
+ *
+ * A negative result means an error
+ */
+extern int HDIF_get_iarray_size(const struct HDIF_common_hdr *hdif,
+ unsigned int di);
+
+/* HDIF_child_arr - Get a child array from this HDIF.
+ *
+ * @hdif : HDIF structure pointer
+ * @idx : the child to get
+ *
+ * NULL means an error (not that many children).
+ */
+extern struct HDIF_child_ptr *
+HDIF_child_arr(const struct HDIF_common_hdr *hdif, unsigned int idx);
+
+/* HDIF_child - Deref a child_ptr entry.
+ *
+ * @hdif : HDIF structure pointer
+ * @child : the child returned from HDIF_child_arr
+ * @idx : the index of the child to get (< child->count).
+ * @eyecatcher: the 6-char ID expected for this child.
+ *
+ * NULL means an error.
+ */
+extern struct HDIF_common_hdr *HDIF_child(const struct HDIF_common_hdr *hdif,
+ const struct HDIF_child_ptr *child,
+ unsigned int idx,
+ const char *eyecatcher);
+#endif /* __HDIF_H */
diff --git a/hdata/hostservices.c b/hdata/hostservices.c
new file mode 100644
index 0000000..ff2edbe
--- /dev/null
+++ b/hdata/hostservices.c
@@ -0,0 +1,96 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <libfdt/libfdt.h>
+#include <skiboot.h>
+#include <device.h>
+#include <compiler.h>
+#include <hostservices.h>
+
+#include "spira.h"
+#include "hdata.h"
+
+static void merge_property(const struct dt_node *src_root,
+ struct dt_node *dst_root,
+ const char *name)
+{
+ const struct dt_property *src;
+ struct dt_property *dst;
+
+ /* Nothing to merge if old one doesn't exist. */
+ src = dt_find_property(src_root, name);
+ if (!src)
+ return;
+
+ /* Just create a new one if there's none in dst. */
+ dst = __dt_find_property(dst_root, name);
+ if (!dst) {
+ dt_add_property(dst_root, name, src->prop, src->len);
+ return;
+ }
+
+ /* Append src to dst. */
+ dt_resize_property(&dst, dst->len + src->len);
+ memcpy(dst->prop + dst->len, src->prop, src->len);
+ dst->len += src->len;
+}
+
+static void hservice_parse_dt_tree(const struct dt_node *src)
+{
+ const struct dt_property *sprop;
+
+ /* Copy/merge reserved names & ranges properties. */
+ list_for_each(&src->properties, sprop, list) {
+ if (streq(sprop->name, "reserved-names") ||
+ streq(sprop->name, "reserved-ranges") ||
+ streq(sprop->name, "ibm,enabled-idle-states"))
+ merge_property(src, dt_root, sprop->name);
+ }
+}
+
+/* Get host services information from hdat. */
+bool hservices_from_hdat(const void *fdt, size_t size)
+{
+ int err;
+ struct dt_node *hservices;
+
+ printf("HBRT: Found mini-DT at 0x%p size: 0x%08lx\n", fdt, size);
+
+ /* For diagnostic purposes, we copy the whole blob over */
+ dt_add_property(dt_root, "ibm,hbrt-mini-fdt", fdt, size);
+
+ /* Parse & extract relevant properties */
+ err = fdt_check_header(fdt);
+ if (err) {
+ prerror("HBRT: fdt blob %p hdr error %d\n", fdt, err);
+ return false;
+ }
+
+ hservices = dt_new_root("ibm,hostservices");
+ err = dt_expand_node(hservices, fdt, 0);
+ if (err < 0) {
+ prerror("HBRT: fdt blob %p parse error %d\n", fdt, err);
+ return false;
+ }
+
+ hservice_parse_dt_tree(hservices);
+ return true;
+}
+
diff --git a/hdata/iohub.c b/hdata/iohub.c
new file mode 100644
index 0000000..0dbec36
--- /dev/null
+++ b/hdata/iohub.c
@@ -0,0 +1,715 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include "spira.h"
+#include <cpu.h>
+#include <fsp.h>
+#include <opal.h>
+#include <ccan/str/str.h>
+#include <ccan/array_size/array_size.h>
+#include <device.h>
+#include <p5ioc2.h>
+#include <p7ioc.h>
+#include <vpd.h>
+
+#include "hdata.h"
+
+static void io_add_common(struct dt_node *hn, const struct cechub_io_hub *hub)
+{
+ dt_add_property_cells(hn, "#address-cells", 2);
+ dt_add_property_cells(hn, "#size-cells", 2);
+ dt_add_property_cells(hn, "ibm,buid-ext", hub->buid_ext);
+ dt_add_property_cells(hn, "ibm,chip-id",
+ pcid_to_chip_id(hub->proc_chip_id));
+ dt_add_property_cells(hn, "ibm,gx-index", hub->gx_index);
+ dt_add_property_cells(hn, "revision", hub->ec_level);
+
+ /* Instead of exposing the GX BARs as separate ranges as we *should*
+ * do in an ideal world, we just create a pass-through ranges and
+ * we use separate properties for the BARs.
+ *
+ * This is hackish but will do for now and avoids us having to
+ * do too complex ranges property parsing
+ */
+ dt_add_property(hn, "ranges", NULL, 0);
+ dt_add_property_cells(hn, "ibm,gx-bar-1",
+ hi32(hub->gx_ctrl_bar1), lo32(hub->gx_ctrl_bar1));
+ dt_add_property_cells(hn, "ibm,gx-bar-2",
+ hi32(hub->gx_ctrl_bar2), lo32(hub->gx_ctrl_bar2));
+
+ /* Add presence detect if valid */
+ if (hub->flags & CECHUB_HUB_FLAG_FAB_BR0_PDT)
+ dt_add_property_cells(hn, "ibm,br0-presence-detect",
+ hub->fab_br0_pdt);
+ if (hub->flags & CECHUB_HUB_FLAG_FAB_BR1_PDT)
+ dt_add_property_cells(hn, "ibm,br1-presence-detect",
+ hub->fab_br1_pdt);
+}
+
+static bool io_get_lx_info(const void *kwvpd, unsigned int kwvpd_sz,
+ int lx_idx, struct dt_node *hn)
+{
+ const void *lxr;
+ char recname[5];
+
+ /* Find LXRn, where n is the index passed in*/
+ strcpy(recname, "LXR0");
+ recname[3] += lx_idx;
+ lxr = vpd_find(kwvpd, kwvpd_sz, recname, "LX", NULL);
+ if (!lxr) {
+ /* Not found, try VINI */
+ lxr = vpd_find(kwvpd, kwvpd_sz, "VINI",
+ "LX", NULL);
+ if (lxr)
+ lx_idx = VPD_LOAD_LXRN_VINI;
+ }
+ if (!lxr) {
+ printf("CEC: LXR%x not found !\n", lx_idx);
+ return false;
+ }
+
+ printf("CEC: LXRn=%d LXR=%016lx\n", lx_idx,
+ lxr ? *(unsigned long *)lxr : 0);
+ printf("CEC: LX Info added to %llx\n", (long long)hn);
+
+ /* Add the LX info */
+ if (!dt_has_node_property(hn, "ibm,vpd-lx-info", NULL)) {
+ dt_add_property_cells(hn, "ibm,vpd-lx-info",
+ lx_idx,
+ ((uint32_t *)lxr)[0],
+ ((uint32_t *)lxr)[1]);
+ }
+
+ return true;
+}
+
+
+static void io_get_loc_code(const void *sp_iohubs, struct dt_node *hn, const char *prop_name)
+{
+ const struct spira_fru_id *fru_id;
+ unsigned int fru_id_sz;
+ char loc_code[LOC_CODE_SIZE + 1];
+ const char *slca_loc_code;
+
+ /* Find SLCA Index */
+ fru_id = HDIF_get_idata(sp_iohubs, CECHUB_FRU_ID_DATA, &fru_id_sz);
+ if (fru_id) {
+ memset(loc_code, 0, sizeof(loc_code));
+
+ /* Find LOC Code from SLCA Index */
+ slca_loc_code = slca_get_loc_code_index(fru_id->slca_index);
+ if (slca_loc_code) {
+ strncpy(loc_code, slca_loc_code, LOC_CODE_SIZE);
+ if (!dt_has_node_property(hn, prop_name, NULL)) {
+ dt_add_property(hn, prop_name, loc_code,
+ strlen(loc_code) + 1);
+ }
+ printf("CEC: %s: %s (SLCA rsrc 0x%x)\n",
+ prop_name, loc_code, be16_to_cpu(fru_id->rsrc_id));
+ } else {
+ printf("CEC: SLCA Loc not found: index %d\n",
+ fru_id->slca_index);
+ }
+ } else {
+ printf("CEC: Hub FRU ID not found...\n");
+ }
+}
+
+static struct dt_node *io_add_p5ioc2(const struct cechub_io_hub *hub,
+ const void *sp_iohubs)
+{
+ struct dt_node *hn;
+ uint64_t reg[2];
+
+ const void *kwvpd;
+ unsigned int kwvpd_sz;
+
+ printf(" GX#%d BUID_Ext = 0x%x\n",
+ be32_to_cpu(hub->gx_index),
+ be32_to_cpu(hub->buid_ext));
+ printf(" GX BAR 0 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar0));
+ printf(" GX BAR 1 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar1));
+ printf(" GX BAR 2 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar2));
+ printf(" GX BAR 3 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar3));
+ printf(" GX BAR 4 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar4));
+
+ /* We assume SBAR == GX0 + some hard coded offset */
+ reg[0] = cleanup_addr(be64_to_cpu(hub->gx_ctrl_bar0) + P5IOC2_REGS_OFFSET);
+ reg[1] = 0x2000000;
+
+ hn = dt_new_addr(dt_root, "io-hub", reg[0]);
+ if (!hn)
+ return NULL;
+
+ dt_add_property(hn, "reg", reg, sizeof(reg));
+ dt_add_property_strings(hn, "compatible", "ibm,p5ioc2");
+
+ kwvpd = HDIF_get_idata(sp_iohubs, CECHUB_ASCII_KEYWORD_VPD, &kwvpd_sz);
+ if (kwvpd && kwvpd != sp_iohubs) {
+ /*
+ * XX We don't know how to properly find the LXRn
+ * record so for now we'll just try LXR0 and if not
+ * found, we try LXR1
+ */
+ if (!io_get_lx_info(kwvpd, kwvpd_sz, 0, hn))
+ io_get_lx_info(kwvpd, kwvpd_sz, 1, hn);
+ } else
+ printf("CEC: P5IOC2 Keywords not found.\n");
+
+ /* Get slots base loc code */
+ io_get_loc_code(sp_iohubs, hn, "ibm,io-base-loc-code");
+
+ return hn;
+}
+
+static struct dt_node *io_add_p7ioc(const struct cechub_io_hub *hub,
+ const void *sp_iohubs)
+{
+ struct dt_node *hn;
+ uint64_t reg[2];
+
+ const void *kwvpd;
+ unsigned int kwvpd_sz;
+
+ printf(" GX#%d BUID_Ext = 0x%x\n",
+ be32_to_cpu(hub->gx_index),
+ be32_to_cpu(hub->buid_ext));
+ printf(" GX BAR 0 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar0));
+ printf(" GX BAR 1 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar1));
+ printf(" GX BAR 2 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar2));
+ printf(" GX BAR 3 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar3));
+ printf(" GX BAR 4 = 0x%016llx\n", be64_to_cpu(hub->gx_ctrl_bar4));
+
+ /* We only know about memory map 1 */
+ if (hub->mem_map_vers != 1) {
+ prerror("P7IOC: Unknown memory map %d\n", hub->mem_map_vers);
+ /* We try to continue anyway ... */
+ }
+
+ reg[0] = cleanup_addr(be64_to_cpu(hub->gx_ctrl_bar1));
+ reg[1] = 0x2000000;
+
+ hn = dt_new_addr(dt_root, "io-hub", reg[0]);
+ if (!hn)
+ return NULL;
+
+ dt_add_property(hn, "reg", reg, sizeof(reg));
+ dt_add_property_strings(hn, "compatible", "ibm,p7ioc", "ibm,ioda-hub");
+
+ kwvpd = HDIF_get_idata(sp_iohubs, CECHUB_ASCII_KEYWORD_VPD, &kwvpd_sz);
+ if (kwvpd && kwvpd != sp_iohubs) {
+ /*
+ * XX We don't know how to properly find the LXRn
+ * record so for now we'll just try LXR0 and if not
+ * found, we try LXR1
+ */
+ if (!io_get_lx_info(kwvpd, kwvpd_sz, 0, hn))
+ io_get_lx_info(kwvpd, kwvpd_sz, 1, hn);
+ } else
+ printf("CEC: P7IOC Keywords not found.\n");
+
+ io_get_loc_code(sp_iohubs, hn, "ibm,io-base-loc-code");
+
+ return hn;
+}
+
+static struct dt_node *io_add_phb3(const struct cechub_io_hub *hub,
+ const struct HDIF_common_hdr *sp_iohubs,
+ unsigned int index, struct dt_node *xcom,
+ unsigned int pe_xscom,
+ unsigned int pci_xscom,
+ unsigned int spci_xscom)
+{
+ struct dt_node *pbcq;
+ uint32_t reg[6];
+ unsigned int hdif_vers;
+
+ /* Get HDIF version */
+ hdif_vers = be16_to_cpu(sp_iohubs->version);
+
+ /* Create PBCQ node under xscom */
+ pbcq = dt_new_addr(xcom, "pbcq", pe_xscom);
+ if (!pbcq)
+ return NULL;
+
+ /* "reg" property contains in order the PE, PCI and SPCI XSCOM
+ * addresses
+ */
+ reg[0] = pe_xscom;
+ reg[1] = 0x20;
+ reg[2] = pci_xscom;
+ reg[3] = 0x05;
+ reg[4] = spci_xscom;
+ reg[5] = 0x15;
+ dt_add_property(pbcq, "reg", reg, sizeof(reg));
+
+ /* A couple more things ... */
+ dt_add_property_strings(pbcq, "compatible", "ibm,power8-pbcq");
+ dt_add_property_cells(pbcq, "ibm,phb-index", index);
+ dt_add_property_cells(pbcq, "ibm,hub-id", be16_to_cpu(hub->hub_num));
+
+ /* The loc code of the PHB itself is different from the base
+ * loc code of the slots (It's actually the DCM's loc code).
+ */
+ io_get_loc_code(sp_iohubs, pbcq, "ibm,loc-code");
+
+ /* We indicate that this is an IBM setup, which means that
+ * the presence detect A/B bits are meaningful. So far we
+ * don't know whether they make any sense on customer setups
+ * so we only set that when booting with HDAT
+ */
+ dt_add_property(pbcq, "ibm,use-ab-detect", NULL, 0);
+
+ /* HDAT spec has these in version 0x6A and later */
+ if (hdif_vers >= 0x6a) {
+ u64 eq0 = be64_to_cpu(hub->phb_lane_eq[index][0]);
+ u64 eq1 = be64_to_cpu(hub->phb_lane_eq[index][1]);
+ u64 eq2 = be64_to_cpu(hub->phb_lane_eq[index][2]);
+ u64 eq3 = be64_to_cpu(hub->phb_lane_eq[index][3]);
+ dt_add_property_cells(pbcq, "ibm,lane-eq",
+ hi32(eq0), lo32(eq0),
+ hi32(eq1), lo32(eq1),
+ hi32(eq2), lo32(eq2),
+ hi32(eq3), lo32(eq3));
+ }
+
+ /* Currently we only create a PBCQ node, the actual PHB nodes
+ * will be added by sapphire later on.
+ */
+ return pbcq;
+}
+
+static struct dt_node *io_add_murano(const struct cechub_io_hub *hub,
+ const struct HDIF_common_hdr *sp_iohubs)
+{
+ struct dt_node *xscom;
+ unsigned int i, chip_id;
+
+ chip_id = pcid_to_chip_id(be32_to_cpu(hub->proc_chip_id));
+
+ printf("CEC: HW CHIP=0x%x, HW TOPO=0x%04x\n", chip_id,
+ be16_to_cpu(hub->hw_topology));
+
+ xscom = find_xscom_for_chip(chip_id);
+ if (!xscom) {
+ prerror("MURANO: Can't find XSCOM for chip %d\n", chip_id);
+ return NULL;
+ }
+
+ /* Create PHBs, max 3 */
+ for (i = 0; i < 3; i++) {
+ if (hub->fab_br0_pdt & (0x80 >> i))
+ /* XSCOM addresses for murano DD1.0 */
+ io_add_phb3(hub, sp_iohubs, i, xscom,
+ 0x02012000 + (i * 0x400),
+ 0x09012000 + (i * 0x400),
+ 0x09013c00 + (i * 0x40));
+ }
+
+ /* HACK: We return the XSCOM device for the VPD info */
+ return xscom;
+}
+
+static void io_add_p8_cec_vpd(const struct HDIF_common_hdr *sp_iohubs)
+{
+ const struct HDIF_child_ptr *iokids;
+ const void *iokid;
+ const void *kwvpd;
+ unsigned int kwvpd_sz;
+
+ /* P8 LXR0 kept in IO KID Keyword VPD */
+ iokids = HDIF_child_arr(sp_iohubs, CECHUB_CHILD_IO_KIDS);
+ if (!CHECK_SPPTR(iokids)) {
+ printf("CEC: No IOKID child array !\n");
+ return;
+ }
+ if (!iokids->count) {
+ printf("CEC: IOKID count is 0 !\n");
+ return;
+ }
+ if (iokids->count > 1) {
+ printf("CEC: WARNING ! More than 1 IO KID !!! (%d)\n",
+ iokids->count);
+ /* Ignoring the additional ones */
+ }
+
+ iokid = HDIF_child(sp_iohubs, iokids, 0, "IO KID");
+ if (!iokid) {
+ printf("CEC: No IO KID structure in child array !\n");
+ return;
+ }
+
+ /* Grab base location code for slots */
+ io_get_loc_code(iokid, dt_root, "ibm,io-base-loc-code");
+
+ kwvpd = HDIF_get_idata(iokid, CECHUB_ASCII_KEYWORD_VPD, &kwvpd_sz);
+ if (!kwvpd) {
+ printf("CEC: No VPD entry in IO KID !\n");
+ return;
+ }
+
+ /* Grab LX load info */
+ io_get_lx_info(kwvpd, kwvpd_sz, 0, dt_root);
+}
+
+static struct dt_node *io_add_hea(const struct cechub_io_hub *hub,
+ const void *sp_io)
+{
+ struct dt_node *np, *gnp;
+ uint64_t reg[2];
+ unsigned int i, vpd_sz;
+ uint8_t kw_sz;
+ const void *iokid, *vpd, *ccin;
+ const uint8_t *mac;
+ const struct HDIF_child_ptr *iokids;
+
+ /*
+ * We have a table of supported dauther cards looked up
+ * by CCIN. We don't use the 1008 slot map in the VPD.
+ *
+ * This is basically translated from BML and will do for
+ * now especially since we don't really support p5ioc2
+ * machine, this is just for lab use
+ *
+ * This is mostly untested on 10G ... we might need more
+ * info about the PHY in that case
+ */
+ const struct hea_iocard {
+ const char ccin[4];
+ struct {
+ uint32_t speed;
+ uint16_t ports;
+ uint16_t phy_id;
+ } pg[2];
+ } hea_iocards[] = {
+ {
+ .ccin = "1818", /* HV4 something */
+ .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
+ },
+ {
+ .ccin = "1819", /* HV4 Titov Card */
+ .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
+ .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 0 },
+ },
+ {
+ .ccin = "1830", /* HV4 Sergei Card */
+ .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 0 },
+ .pg[1] = { .speed = 10000, .ports = 1, .phy_id = 0 },
+ },
+ {
+ .ccin = "181A", /* L4 Evans Card */
+ .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 0 },
+ },
+ {
+ .ccin = "181B", /* L4 Weber Card */
+ .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 0 },
+ .pg[1] = { .speed = 10000, .ports = 1, .phy_id = 0 },
+ },
+ {
+ .ccin = "181C", /* HV4 Gibson Card */
+ .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
+ .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 0 },
+ },
+ {
+ .ccin = "2BC4", /* MR Riverside 2 */
+ .pg[0] = { .speed = 1000, .ports = 1, .phy_id = 1 },
+ .pg[1] = { .speed = 1000, .ports = 1, .phy_id = 1 },
+ },
+ {
+ .ccin = "2BC5", /* MR Lions 2 */
+ .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 1 },
+ .pg[1] = { .speed = 10000, .ports = 1, .phy_id = 1 },
+ },
+ {
+ .ccin = "2BC6", /* MR Onion 2 */
+ .pg[0] = { .speed = 10000, .ports = 1, .phy_id = 1 },
+ .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 1 },
+ },
+ {
+ .ccin = "266D", /* Jupiter Bonzai */
+ .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 1 },
+ .pg[1] = { .speed = 1000, .ports = 2, .phy_id = 1 },
+ },
+ /* The blade use an IO KID that's a bit oddball and seems to
+ * represent the backplane itself, but let's use it anyway
+ *
+ * XXX Probably want a different PHY type !
+ */
+ {
+ .ccin = "531C", /* P7 Blade */
+ .pg[0] = { .speed = 1000, .ports = 2, .phy_id = 0 },
+ },
+ };
+ const struct hea_iocard *card = NULL;
+
+ /* WARNING: This makes quite a lot of nasty assumptions
+ * that appear to hold true on the few machines I care
+ * about, which is good enough for now. We don't officially
+ * support p5ioc2 anyway...
+ */
+
+ /* Get first IO KID, we only support one. Real support would
+ * mean using the FRU ID and the SLCA to find the right "stuff"
+ * but at this stage it's unnecessary
+ */
+ iokids = HDIF_child_arr(sp_io, CECHUB_CHILD_IO_KIDS);
+ if (!CHECK_SPPTR(iokids)) {
+ prerror("HEA: no IOKID in HDAT child array !\n");
+ return NULL;
+ }
+ if (!iokids->count) {
+ prerror("HEA: IOKID count is 0 !\n");
+ return NULL;
+ }
+ if (iokids->count > 1) {
+ printf("HEA: WARNING ! More than 1 IO KID !!! (%d)\n",
+ iokids->count);
+ }
+ iokid = HDIF_child(sp_io, iokids, 0, "IO KID");
+ if (!iokid) {
+ prerror("HEA: Failed to retrieve IO KID 0 !\n");
+ return NULL;
+ }
+
+ /* Grab VPD */
+ vpd = HDIF_get_idata(iokid, IOKID_KW_VPD, &vpd_sz);
+ if (!CHECK_SPPTR(vpd)) {
+ prerror("HEA: Failed to retrieve VPD from IO KID !\n");
+ return NULL;
+ }
+
+ /* Grab the MAC address */
+ mac = vpd_find(vpd, vpd_sz, "VINI", "B1", &kw_sz);
+ if (!mac || kw_sz < 8) {
+ prerror("HEA: Failed to retrieve MAC Address !\n");
+ return NULL;
+ }
+
+ /* Grab the CCIN (card ID) */
+ ccin = vpd_find(vpd, vpd_sz, "VINI", "CC", &kw_sz);
+ if (!ccin || kw_sz < 4) {
+ prerror("HEA: Failed to retrieve CCIN !\n");
+ return NULL;
+ }
+
+ /* Now we could try to parse the 1008 slot map etc... but instead
+ * we'll do like BML and grab the CCIN & use it for known cards.
+ * We also grab the MAC
+ */
+ for (i = 0; i < ARRAY_SIZE(hea_iocards) && !card; i++) {
+ if (strncmp(hea_iocards[i].ccin, ccin, 4))
+ continue;
+ card = &hea_iocards[i];
+ }
+ if (!card) {
+ prerror("HEA: Unknown CCIN 0x%.4s!\n", (const char *)ccin);
+ return NULL;
+ }
+
+ /* Assume base address is BAR3 + 0x4000000000 */
+ reg[0] = hub->gx_ctrl_bar3 + 0x4000000000;
+ reg[1] = 0xc0000000;
+
+ printf("CEC: * Adding HEA to P5IOC2, assuming GBA=0x%llx\n",
+ (long long)reg[0]);
+ np = dt_new_addr(dt_root, "ibm,hea", reg[0]);
+ if (!np)
+ return NULL;
+
+ dt_add_property(np, "reg", reg, sizeof(reg));
+ dt_add_property_strings(np, "compatible", "ibm,p5ioc2-hea");
+ dt_add_property_cells(np, "#address-cells", 1);
+ dt_add_property_cells(np, "#size-cells", 0);
+ dt_add_property(np, "ibm,vpd", vpd, vpd_sz);
+ dt_add_property_cells(np, "#mac-address", mac[7]);
+ dt_add_property(np, "mac-address-base", mac, 6);
+ /* BUID is base + 0x30 */
+ dt_add_property(np, "interrupt-controller", NULL, 0);
+ dt_add_property_cells(np, "interrupt-base",
+ ((hub->buid_ext << 9) | 0x30) << 4);
+ dt_add_property_cells(np, "interrupt-max-count", 128);
+
+ /* Always 2 port groups */
+ for (i = 0; i < 2; i++) {
+ unsigned int clause;
+
+ switch(card->pg[i].speed) {
+ case 1000:
+ clause = 0x22;
+ break;
+ case 10000:
+ clause = 0x45;
+ break;
+ default:
+ /* Unused port group */
+ continue;
+ }
+ gnp = dt_new_addr(np, "portgroup", i + 1);
+ if (!gnp)
+ continue;
+
+ dt_add_property_cells(gnp, "reg", i + 1);
+ dt_add_property_cells(gnp, "speed", card->pg[i].speed);
+ /* XX FIXME */
+ dt_add_property_strings(gnp, "phy-type", "mdio");
+ dt_add_property_cells(gnp, "phy-mdio-addr", card->pg[i].phy_id);
+ dt_add_property_cells(gnp, "phy-mdio-clause", clause);
+ dt_add_property_cells(gnp, "subports", card->pg[i].ports);
+ }
+ return np;
+}
+
+static void io_parse_fru(const void *sp_iohubs)
+{
+ unsigned int i;
+ struct dt_node *hn;
+ int count;
+
+ count = HDIF_get_iarray_size(sp_iohubs, CECHUB_FRU_IO_HUBS);
+ if (count < 1) {
+ prerror("CEC: IO FRU with no chips !\n");
+ return;
+ }
+
+ printf("CEC: %d chips in FRU\n", count);
+
+ /* Iterate IO hub array */
+ for (i = 0; i < count; i++) {
+ const struct cechub_io_hub *hub;
+ unsigned int size, hub_id;
+
+ hub = HDIF_get_iarray_item(sp_iohubs, CECHUB_FRU_IO_HUBS,
+ i, &size);
+ if (!hub || size < CECHUB_IOHUB_MIN_SIZE) {
+ prerror("CEC: IO-HUB Chip %d bad idata\n", i);
+ continue;
+ }
+ printf("CEC: IO Hub Chip #%d:\n", i);
+ switch (hub->flags & CECHUB_HUB_FLAG_STATE_MASK) {
+ case CECHUB_HUB_FLAG_STATE_OK:
+ printf("CEC: OK\n");
+ break;
+ case CECHUB_HUB_FLAG_STATE_FAILURES:
+ printf("CEC: OK with failures\n");
+ break;
+ case CECHUB_HUB_FLAG_STATE_NOT_INST:
+ printf("CEC: Not installed\n");
+ continue;
+ case CECHUB_HUB_FLAG_STATE_UNUSABLE:
+ printf("CEC: Unusable");
+ continue;
+ }
+
+ hub_id = be16_to_cpu(hub->iohub_id);
+
+ /* GX BAR assignment */
+ printf("CEC: PChip: %d HUB ID: %04x [EC=0x%x] Hub#=%d)\n",
+ be32_to_cpu(hub->proc_chip_id), hub_id,
+ be32_to_cpu(hub->ec_level), be32_to_cpu(hub->hub_num));
+
+ switch(hub_id) {
+ case CECHUB_HUB_P7IOC:
+ printf("CEC: P7IOC !\n");
+ hn = io_add_p7ioc(hub, sp_iohubs);
+ io_add_common(hn, hub);
+ break;
+ case CECHUB_HUB_P5IOC2:
+ printf("CEC: P5IOC2 !\n");
+ hn = io_add_p5ioc2(hub, sp_iohubs);
+ io_add_common(hn, hub);
+ io_add_hea(hub, sp_iohubs);
+ break;
+ case CECHUB_HUB_MURANO:
+ case CECHUB_HUB_MURANO_SEGU:
+ printf("CEC: Murano !\n");
+ hn = io_add_murano(hub, sp_iohubs);
+ break;
+ default:
+ printf("CEC: Hub ID 0x%04x unsupported !\n",
+ hub_id);
+ hn = NULL;
+ }
+ }
+
+ /* On P8, grab the CEC VPD */
+ if (proc_gen == proc_gen_p8)
+ io_add_p8_cec_vpd(sp_iohubs);
+}
+
+void io_parse(void)
+{
+ const struct HDIF_common_hdr *sp_iohubs;
+ unsigned int i, size;
+
+ /* Look for IO Hubs */
+ if (!get_hdif(&spira.ntuples.cec_iohub_fru, "IO HUB")) {
+ prerror("CEC: Cannot locate IO Hub FRU data !\n");
+ return;
+ }
+
+ /*
+ * Note about LXRn numbering ...
+ *
+ * I can't completely make sense of what that is supposed to be, so
+ * for now, what we do is look for the first one we can find and
+ * increment it for each chip. Works for the machines I have here
+ */
+
+ for_each_ntuple_idx(&spira.ntuples.cec_iohub_fru, sp_iohubs, i,
+ CECHUB_FRU_HDIF_SIG) {
+ const struct cechub_hub_fru_id *fru_id_data;
+ unsigned int type;
+ static const char *typestr[] = {
+ "Reservation",
+ "Card",
+ "CPU Card",
+ "Backplane",
+ "Backplane Extension"
+ };
+ fru_id_data = HDIF_get_idata(sp_iohubs, CECHUB_FRU_ID_DATA_AREA,
+ &size);
+ if (!fru_id_data || size < sizeof(struct cechub_hub_fru_id)) {
+ prerror("CEC: IO-HUB FRU %d, bad ID data\n", i);
+ continue;
+ }
+ type = fru_id_data->card_type;
+
+ printf("CEC: HUB FRU %d is %s\n",
+ i, type > 4 ? "Unknown" : typestr[type]);
+
+ /*
+ * We currently only handle the backplane (Juno) and
+ * processor FRU (P8 machines)
+ */
+ if (type != CECHUB_FRU_TYPE_CEC_BKPLANE &&
+ type != CECHUB_FRU_TYPE_CPU_CARD) {
+ prerror("CEC: Unsupported type\n");
+ continue;
+ }
+
+ /* We don't support Hubs connected to pass-through ports */
+ if (fru_id_data->flags & (CECHUB_FRU_FLAG_HEADLESS |
+ CECHUB_FRU_FLAG_PASSTHROUGH)) {
+ prerror("CEC: Headless or Passthrough unsupported\n");
+ continue;
+ }
+
+ /* Ok, we have a reasonable candidate */
+ io_parse_fru(sp_iohubs);
+ }
+}
+
diff --git a/hdata/memory.c b/hdata/memory.c
new file mode 100644
index 0000000..8cca689
--- /dev/null
+++ b/hdata/memory.c
@@ -0,0 +1,377 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory.h>
+#include <cpu.h>
+#include <device_tree.h>
+#include <device.h>
+#include <vpd.h>
+#include <ccan/str/str.h>
+#include <libfdt/libfdt.h>
+#include <types.h>
+
+#include "spira.h"
+#include "hdata.h"
+
+struct HDIF_ram_area_id {
+ __be16 id;
+#define RAM_AREA_INSTALLED 0x8000
+#define RAM_AREA_FUNCTIONAL 0x4000
+ __be16 flags;
+};
+
+struct HDIF_ram_area_size {
+ __be64 mb;
+};
+
+struct ram_area {
+ const struct HDIF_ram_area_id *raid;
+ const struct HDIF_ram_area_size *rasize;
+};
+
+struct HDIF_ms_area_address_range {
+ __be64 start;
+ __be64 end;
+ __be32 chip;
+ __be32 mirror_attr;
+ __be64 mirror_start;
+};
+
+struct HDIF_ms_area_id {
+ __be16 id;
+#define MS_PTYPE_RISER_CARD 0x8000
+#define MS_PTYPE_MEM_CARD 0x4000
+#define MS_PTYPE_CEC_FRU 0x2000
+#define MS_PTYPE_HYBRID_CARD 0x1000
+ __be16 parent_type;
+#define MS_AREA_INSTALLED 0x8000
+#define MS_AREA_FUNCTIONAL 0x4000
+#define MS_AREA_SHARED 0x2000
+ __be16 flags;
+ __be16 share_id;
+};
+
+static struct dt_node *find_shared(struct dt_node *root, u16 id, u64 start, u64 len)
+{
+ struct dt_node *i;
+
+ for (i = dt_first(root); i; i = dt_next(root, i)) {
+ __be64 reg[2];
+ const struct dt_property *shared, *type;
+
+ type = dt_find_property(i, "device_type");
+ if (!type || strcmp(type->prop, "memory") != 0)
+ continue;
+
+ shared = dt_find_property(i, DT_PRIVATE "share-id");
+ if (!shared || fdt32_to_cpu(*(u32 *)shared->prop) != id)
+ continue;
+
+ memcpy(reg, dt_find_property(i, "reg")->prop, sizeof(reg));
+ if (be64_to_cpu(reg[0]) == start && be64_to_cpu(reg[1]) == len)
+ break;
+ }
+ return i;
+}
+
+static void append_chip_id(struct dt_node *mem, u32 id)
+{
+ struct dt_property *prop;
+ size_t len, i;
+ u32 *p;
+
+ prop = __dt_find_property(mem, "ibm,chip-id");
+ if (!prop)
+ return;
+ len = prop->len >> 2;
+ p = (u32 *)prop->prop;
+
+ /* Check if it exists already */
+ for (i = 0; i < len; i++) {
+ if (be32_to_cpu(p[i]) == id)
+ return;
+ }
+
+ /* Add it to the list */
+ dt_resize_property(&prop, (len + 1) << 2);
+ p = (u32 *)prop->prop;
+ p[len] = cpu_to_be32(id);
+}
+
+static bool add_address_range(struct dt_node *root,
+ const struct HDIF_ms_area_id *id,
+ const struct HDIF_ms_area_address_range *arange)
+{
+ struct dt_node *mem;
+ u64 reg[2];
+ char name[sizeof("memory@") + STR_MAX_CHARS(reg[0])];
+ u32 chip_id;
+
+ printf(" Range: 0x%016llx..0x%016llx on Chip 0x%x mattr: 0x%x\n",
+ (long long)arange->start, (long long)arange->end,
+ pcid_to_chip_id(arange->chip), arange->mirror_attr);
+
+ /* reg contains start and length */
+ reg[0] = cleanup_addr(be64_to_cpu(arange->start));
+ reg[1] = cleanup_addr(be64_to_cpu(arange->end)) - reg[0];
+
+ chip_id = pcid_to_chip_id(be32_to_cpu(arange->chip));
+
+ if (be16_to_cpu(id->flags) & MS_AREA_SHARED) {
+ /* Only enter shared nodes once. */
+ mem = find_shared(root, be16_to_cpu(id->share_id),
+ reg[0], reg[1]);
+ if (mem) {
+ append_chip_id(mem, chip_id);
+ return true;
+ }
+ }
+ sprintf(name, "memory@%llx", (long long)reg[0]);
+
+ mem = dt_new(root, name);
+ dt_add_property_string(mem, "device_type", "memory");
+ dt_add_property_cells(mem, "ibm,chip-id", chip_id);
+ dt_add_property_u64s(mem, "reg", reg[0], reg[1]);
+ if (be16_to_cpu(id->flags) & MS_AREA_SHARED)
+ dt_add_property_cells(mem, DT_PRIVATE "share-id",
+ be16_to_cpu(id->share_id));
+
+ return true;
+}
+
+static void add_chip_id_to_ram_area(const struct HDIF_common_hdr *msarea,
+ struct dt_node *ram_area)
+{
+ const struct HDIF_array_hdr *arr;
+ const struct HDIF_ms_area_address_range *arange;
+ unsigned int size;
+ u32 chip_id;
+
+ /* Safe to assume pointers are valid here. */
+ arr = HDIF_get_idata(msarea, 4, &size);
+ arange = (void *)arr + be32_to_cpu(arr->offset);
+ chip_id = pcid_to_chip_id(be32_to_cpu(arange->chip));
+ dt_add_property_cells(ram_area, "ibm,chip-id", chip_id);
+}
+
+static void add_size_to_ram_area(struct dt_node *ram_node,
+ const struct HDIF_common_hdr *hdr,
+ int indx_vpd)
+{
+ const void *fruvpd;
+ unsigned int fruvpd_sz;
+ const void *kw;
+ char *str;
+ uint8_t kwsz;
+
+ fruvpd = HDIF_get_idata(hdr, indx_vpd, &fruvpd_sz);
+ if (!CHECK_SPPTR(fruvpd))
+ return;
+
+ /* DIMM Size */
+ kw = vpd_find(fruvpd, fruvpd_sz, "VINI", "SZ", &kwsz);
+ if (!kw)
+ return;
+
+ str = zalloc(kwsz + 1);
+ memcpy(str, kw, kwsz);
+ dt_add_property_string(ram_node, "size", str);
+ free(str);
+}
+
+static void vpd_add_ram_area(const struct HDIF_common_hdr *msarea)
+{
+ unsigned int i;
+ unsigned int ram_sz;
+ const struct HDIF_common_hdr *ramarea;
+ const struct HDIF_child_ptr *ramptr;
+ const struct HDIF_ram_area_id *ram_id;
+ struct dt_node *ram_node;
+
+ ramptr = HDIF_child_arr(msarea, 0);
+ if (!CHECK_SPPTR(ramptr)) {
+ prerror("MS AREA: No RAM area at %p\n", msarea);
+ return;
+ }
+
+ for (i = 0; i < be32_to_cpu(ramptr->count); i++) {
+ ramarea = HDIF_child(msarea, ramptr, i, "RAM ");
+ if (!CHECK_SPPTR(ramarea))
+ continue;
+
+ ram_id = HDIF_get_idata(ramarea, 2, &ram_sz);
+ if (!CHECK_SPPTR(ram_id))
+ continue;
+
+ if ((be16_to_cpu(ram_id->flags) & RAM_AREA_INSTALLED) &&
+ (be16_to_cpu(ram_id->flags) & RAM_AREA_FUNCTIONAL)) {
+ ram_node = dt_add_vpd_node(ramarea, 0, 1);
+ if (ram_node) {
+ add_chip_id_to_ram_area(msarea, ram_node);
+ add_size_to_ram_area(ram_node, ramarea, 1);
+ }
+ }
+ }
+}
+
+static void get_msareas(struct dt_node *root,
+ const struct HDIF_common_hdr *ms_vpd)
+{
+ unsigned int i;
+ const struct HDIF_child_ptr *msptr;
+
+ /* First childptr refers to msareas. */
+ msptr = HDIF_child_arr(ms_vpd, MSVPD_CHILD_MS_AREAS);
+ if (!CHECK_SPPTR(msptr)) {
+ prerror("MS VPD: no children at %p\n", ms_vpd);
+ return;
+ }
+
+ for (i = 0; i < be32_to_cpu(msptr->count); i++) {
+ const struct HDIF_common_hdr *msarea;
+ const struct HDIF_array_hdr *arr;
+ const struct HDIF_ms_area_address_range *arange;
+ const struct HDIF_ms_area_id *id;
+ const void *fruid;
+ unsigned int size, j;
+ u16 flags;
+
+ msarea = HDIF_child(ms_vpd, msptr, i, "MSAREA");
+ if (!CHECK_SPPTR(msarea))
+ return;
+
+ id = HDIF_get_idata(msarea, 2, &size);
+ if (!CHECK_SPPTR(id))
+ return;
+ if (size < sizeof(*id)) {
+ prerror("MS VPD: %p msarea #%i id size too small!\n",
+ ms_vpd, i);
+ return;
+ }
+
+ flags = be16_to_cpu(id->flags);
+ printf("MS VPD: %p, area %i: %s %s %s\n",
+ ms_vpd, i,
+ flags & MS_AREA_INSTALLED ?
+ "installed" : "not installed",
+ flags & MS_AREA_FUNCTIONAL ?
+ "functional" : "not functional",
+ flags & MS_AREA_SHARED ?
+ "shared" : "not shared");
+
+ if ((flags & (MS_AREA_INSTALLED|MS_AREA_FUNCTIONAL))
+ != (MS_AREA_INSTALLED|MS_AREA_FUNCTIONAL))
+ continue;
+
+ arr = HDIF_get_idata(msarea, 4, &size);
+ if (!CHECK_SPPTR(arr))
+ continue;
+
+ if (size < sizeof(*arr)) {
+ prerror("MS VPD: %p msarea #%i arr size too small!\n",
+ ms_vpd, i);
+ return;
+ }
+
+ if (be32_to_cpu(arr->eactsz) < sizeof(*arange)) {
+ prerror("MS VPD: %p msarea #%i arange size too small!\n",
+ ms_vpd, i);
+ return;
+ }
+
+ fruid = HDIF_get_idata(msarea, 0, &size);
+ if (!CHECK_SPPTR(fruid))
+ return;
+
+ /* Add Raiser card VPD */
+ if (be16_to_cpu(id->parent_type) & MS_PTYPE_RISER_CARD)
+ dt_add_vpd_node(msarea, 0, 1);
+
+ /* Add RAM Area VPD */
+ vpd_add_ram_area(msarea);
+
+ /* This offset is from the arr, not the header! */
+ arange = (void *)arr + be32_to_cpu(arr->offset);
+ for (j = 0; j < be32_to_cpu(arr->ecnt); j++) {
+ if (!add_address_range(root, id, arange))
+ return;
+ arange = (void *)arange + be32_to_cpu(arr->esize);
+ }
+ }
+}
+
+static bool __memory_parse(struct dt_node *root)
+{
+ struct HDIF_common_hdr *ms_vpd;
+ const struct msvpd_ms_addr_config *msac;
+ const struct msvpd_total_config_ms *tcms;
+ unsigned int size;
+
+ ms_vpd = get_hdif(&spira.ntuples.ms_vpd, MSVPD_HDIF_SIG);
+ if (!ms_vpd) {
+ prerror("MS VPD: invalid\n");
+ op_display(OP_FATAL, OP_MOD_MEM, 0x0000);
+ return false;
+ }
+ if (be32_to_cpu(spira.ntuples.ms_vpd.act_len) < sizeof(*ms_vpd)) {
+ prerror("MS VPD: invalid size %u\n",
+ be32_to_cpu(spira.ntuples.ms_vpd.act_len));
+ op_display(OP_FATAL, OP_MOD_MEM, 0x0001);
+ return false;
+ }
+
+ printf("MS VPD: is at %p\n", ms_vpd);
+
+ msac = HDIF_get_idata(ms_vpd, MSVPD_IDATA_MS_ADDR_CONFIG, &size);
+ if (!CHECK_SPPTR(msac) || size < sizeof(*msac)) {
+ prerror("MS VPD: bad msac size %u @ %p\n", size, msac);
+ op_display(OP_FATAL, OP_MOD_MEM, 0x0002);
+ return false;
+ }
+ printf("MS VPD: MSAC is at %p\n", msac);
+
+ dt_add_property_u64(dt_root, DT_PRIVATE "maxmem",
+ be64_to_cpu(msac->max_configured_ms_address));
+
+ tcms = HDIF_get_idata(ms_vpd, MSVPD_IDATA_TOTAL_CONFIG_MS, &size);
+ if (!CHECK_SPPTR(tcms) || size < sizeof(*tcms)) {
+ prerror("MS VPD: Bad tcms size %u @ %p\n", size, tcms);
+ op_display(OP_FATAL, OP_MOD_MEM, 0x0003);
+ return false;
+ }
+ printf("MS VPD: TCMS is at %p\n", tcms);
+
+ printf("MS VPD: Maximum configured address: 0x%llx\n",
+ (long long)be64_to_cpu(msac->max_configured_ms_address));
+ printf("MS VPD: Maximum possible address: 0x%llx\n",
+ (long long)be64_to_cpu(msac->max_possible_ms_address));
+
+ get_msareas(root, ms_vpd);
+
+ printf("MS VPD: Total MB of RAM: 0x%llx\n",
+ (long long)be64_to_cpu(tcms->total_in_mb));
+
+ return true;
+}
+
+void memory_parse(void)
+{
+ if (!__memory_parse(dt_root)) {
+ prerror("MS VPD: Failed memory init !\n");
+ abort();
+ }
+}
+
diff --git a/hdata/paca.c b/hdata/paca.c
new file mode 100644
index 0000000..d4360e7
--- /dev/null
+++ b/hdata/paca.c
@@ -0,0 +1,322 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include "spira.h"
+#include <cpu.h>
+#include <fsp.h>
+#include <opal.h>
+#include <ccan/str/str.h>
+#include <device.h>
+#include <types.h>
+
+#include "hdata.h"
+
+#define PACA_MAX_THREADS 4
+
+static unsigned int paca_index(const struct HDIF_common_hdr *paca)
+{
+ void *start = get_hdif(&spira.ntuples.paca, PACA_HDIF_SIG);
+ return ((void *)paca - start)
+ / be32_to_cpu(spira.ntuples.paca.alloc_len);
+}
+
+static struct dt_node *add_cpu_node(struct dt_node *cpus,
+ const struct HDIF_common_hdr *paca,
+ const struct sppaca_cpu_id *id,
+ bool okay)
+{
+ const struct sppaca_cpu_timebase *timebase;
+ const struct sppaca_cpu_cache *cache;
+ const struct sppaca_cpu_attr *attr;
+ struct dt_node *cpu;
+ u32 no, size, ve_flags, l2_phandle, chip_id;
+
+ /* We use the process_interrupt_line as the res id */
+ no = be32_to_cpu(id->process_interrupt_line);
+
+ ve_flags = be32_to_cpu(id->verify_exists_flags);
+ printf("CPU[%i]: PIR=%i RES=%i %s %s(%u threads)\n",
+ paca_index(paca), be32_to_cpu(id->pir), no,
+ ve_flags & CPU_ID_PACA_RESERVED
+ ? "**RESERVED**" : cpu_state(ve_flags),
+ ve_flags & CPU_ID_SECONDARY_THREAD
+ ? "[secondary] " :
+ (be32_to_cpu(id->pir) == boot_cpu->pir ? "[boot] " : ""),
+ ((ve_flags & CPU_ID_NUM_SECONDARY_THREAD_MASK)
+ >> CPU_ID_NUM_SECONDARY_THREAD_SHIFT) + 1);
+
+ timebase = HDIF_get_idata(paca, SPPACA_IDATA_TIMEBASE, &size);
+ if (!timebase || size < sizeof(*timebase)) {
+ prerror("CPU[%i]: bad timebase size %u @ %p\n",
+ paca_index(paca), size, timebase);
+ return NULL;
+ }
+
+ cache = HDIF_get_idata(paca, SPPACA_IDATA_CACHE_SIZE, &size);
+ if (!cache || size < sizeof(*cache)) {
+ prerror("CPU[%i]: bad cache size %u @ %p\n",
+ paca_index(paca), size, cache);
+ return NULL;
+ }
+
+ cpu = add_core_common(cpus, cache, timebase, no, okay);
+
+ /* Core attributes */
+ attr = HDIF_get_idata(paca, SPPACA_IDATA_CPU_ATTR, &size);
+ if (attr)
+ add_core_attr(cpu, be32_to_cpu(attr->attr));
+
+ /* Add cache info */
+ l2_phandle = add_core_cache_info(cpus, cache, no, okay);
+ dt_add_property_cells(cpu, "l2-cache", l2_phandle);
+
+ /* We append the secondary cpus in __cpu_parse */
+ dt_add_property_cells(cpu, "ibm,ppc-interrupt-server#s", no);
+
+ dt_add_property_cells(cpu, DT_PRIVATE "hw_proc_id",
+ be32_to_cpu(id->hardware_proc_id));
+ dt_add_property_cells(cpu, "ibm,pir", be32_to_cpu(id->pir));
+
+ chip_id = pcid_to_chip_id(be32_to_cpu(id->processor_chip_id));
+ dt_add_property_cells(cpu, "ibm,chip-id", chip_id);
+
+ return cpu;
+}
+
+static struct dt_node *find_cpu_by_hardware_proc_id(struct dt_node *root,
+ u32 hw_proc_id)
+{
+ struct dt_node *i;
+
+ dt_for_each_node(root, i) {
+ const struct dt_property *prop;
+
+ if (!dt_has_node_property(i, "device_type", "cpu"))
+ continue;
+
+ prop = dt_find_property(i, DT_PRIVATE "hw_proc_id");
+ if (be32_to_cpu(*(u32 *)prop->prop) == hw_proc_id)
+ return i;
+ }
+ return NULL;
+}
+
+/* Note that numbers are small. */
+static void add_be32_sorted(__be32 arr[], __be32 new, unsigned num)
+{
+ unsigned int i;
+
+ /* Walk until we find where we belong (insertion sort). */
+ for (i = 0; i < num; i++) {
+ if (be32_to_cpu(new) < be32_to_cpu(arr[i])) {
+ __be32 tmp = arr[i];
+ arr[i] = new;
+ new = tmp;
+ }
+ }
+ arr[i] = new;
+}
+
+static void add_icps(void)
+{
+ struct dt_node *cpu;
+ unsigned int i;
+ u64 reg[PACA_MAX_THREADS * 2];
+ struct dt_node *icp;
+
+ dt_for_each_node(dt_root, cpu) {
+ u32 irange[2], size, pir;
+ const struct dt_property *intsrv;
+ const struct HDIF_common_hdr *paca;
+ u64 ibase;
+ unsigned int num_threads;
+ bool found = false;
+
+ if (!dt_has_node_property(cpu, "device_type", "cpu"))
+ continue;
+
+ intsrv = dt_find_property(cpu, "ibm,ppc-interrupt-server#s");
+ pir = dt_prop_get_u32(cpu, "ibm,pir");
+
+ /* Get ibase address */
+ paca = get_hdif(&spira.ntuples.paca, PACA_HDIF_SIG);
+ for_each_paca(paca) {
+ const struct sppaca_cpu_id *id;
+ id = HDIF_get_idata(paca, SPPACA_IDATA_CPU_ID, &size);
+
+ if (pir != be32_to_cpu(id->pir))
+ continue;
+ ibase = cleanup_addr(be64_to_cpu(id->ibase));
+ found = true;
+ break;
+ }
+ if (!found)
+ return;
+
+ num_threads = intsrv->len / sizeof(u32);
+ assert(num_threads <= PACA_MAX_THREADS);
+
+ icp = dt_new_addr(dt_root, "interrupt-controller", ibase);
+ if (!icp)
+ continue;
+
+ dt_add_property_strings(icp, "compatible",
+ "IBM,ppc-xicp",
+ "IBM,power7-xicp");
+
+ irange[0] = dt_property_get_cell(intsrv, 0); /* Index */
+ irange[1] = num_threads; /* num servers */
+ dt_add_property(icp, "ibm,interrupt-server-ranges",
+ irange, sizeof(irange));
+ dt_add_property(icp, "interrupt-controller", NULL, 0);
+ dt_add_property_cells(icp, "#address-cells", 0);
+ dt_add_property_cells(icp, "#interrupt-cells", 1);
+ dt_add_property_string(icp, "device_type",
+ "PowerPC-External-Interrupt-Presentation");
+ for (i = 0; i < num_threads*2; i += 2) {
+ reg[i] = ibase;
+ /* One page is enough for a handful of regs. */
+ reg[i+1] = 4096;
+ ibase += reg[i+1];
+ }
+ dt_add_property(icp, "reg", reg, sizeof(reg));
+ }
+}
+
+static bool __paca_parse(void)
+{
+ const struct HDIF_common_hdr *paca;
+ struct dt_node *cpus;
+
+ paca = get_hdif(&spira.ntuples.paca, PACA_HDIF_SIG);
+ if (!paca) {
+ prerror("Invalid PACA (PCIA = %p)\n",
+ ntuple_addr(&spira.ntuples.pcia));
+ return false;
+ }
+
+ if (be32_to_cpu(spira.ntuples.paca.act_len) < sizeof(*paca)) {
+ prerror("PACA: invalid size %u\n",
+ be32_to_cpu(spira.ntuples.paca.act_len));
+ return false;
+ }
+
+ cpus = dt_new(dt_root, "cpus");
+ dt_add_property_cells(cpus, "#address-cells", 1);
+ dt_add_property_cells(cpus, "#size-cells", 0);
+
+ for_each_paca(paca) {
+ const struct sppaca_cpu_id *id;
+ u32 size, ve_flags;
+ bool okay;
+
+ id = HDIF_get_idata(paca, SPPACA_IDATA_CPU_ID, &size);
+
+ /* The ID structure on Blade314 is only 0x54 long. We can
+ * cope with it as we don't use all the additional fields.
+ * The minimum size we support is 0x40
+ */
+ if (!id || size < SPIRA_CPU_ID_MIN_SIZE) {
+ prerror("CPU[%i]: bad id size %u @ %p\n",
+ paca_index(paca), size, id);
+ return false;
+ }
+
+ ve_flags = be32_to_cpu(id->verify_exists_flags);
+ switch ((ve_flags&CPU_ID_VERIFY_MASK) >> CPU_ID_VERIFY_SHIFT) {
+ case CPU_ID_VERIFY_USABLE_NO_FAILURES:
+ case CPU_ID_VERIFY_USABLE_FAILURES:
+ okay = true;
+ break;
+ default:
+ okay = false;
+ }
+
+ printf("CPU[%i]: PIR=%i RES=%i %s\n",
+ paca_index(paca), be32_to_cpu(id->pir),
+ be32_to_cpu(id->process_interrupt_line),
+ okay ? "OK" : "UNAVAILABLE");
+
+ /* Secondary threads don't get their own node. */
+ if (ve_flags & CPU_ID_SECONDARY_THREAD)
+ continue;
+
+ if (!add_cpu_node(cpus, paca, id, okay))
+ return false;
+ }
+
+ /* Now account for secondaries. */
+ for_each_paca(paca) {
+ const struct dt_property *prop;
+ const struct sppaca_cpu_id *id;
+ u32 size, state, num, ve_flags;
+ struct dt_node *cpu;
+ __be32 *new_prop;
+
+ id = HDIF_get_idata(paca, 2, &size);
+ ve_flags = be32_to_cpu(id->verify_exists_flags);
+ state = (ve_flags & CPU_ID_VERIFY_MASK) >> CPU_ID_VERIFY_SHIFT;
+ switch (state) {
+ case CPU_ID_VERIFY_USABLE_NO_FAILURES:
+ case CPU_ID_VERIFY_USABLE_FAILURES:
+ break;
+ default:
+ continue;
+ }
+
+ /* Only interested in secondary threads. */
+ if (!(ve_flags & CPU_ID_SECONDARY_THREAD))
+ continue;
+
+ cpu = find_cpu_by_hardware_proc_id(cpus,
+ be32_to_cpu(id->hardware_proc_id));
+ if (!cpu) {
+ prerror("CPU[%i]: could not find primary hwid %i\n",
+ paca_index(paca),
+ be32_to_cpu(id->hardware_proc_id));
+ return false;
+ }
+
+ /* Add the cpu #. */
+ prop = dt_find_property(cpu, "ibm,ppc-interrupt-server#s");
+ num = prop->len / sizeof(u32);
+ new_prop = malloc((num + 1) * sizeof(u32));
+ if (!new_prop) {
+ prerror("Property allocation length %zu failed\n",
+ (num + 1) * sizeof(u32));
+ return false;
+ }
+ memcpy(new_prop, prop->prop, prop->len);
+ add_be32_sorted(new_prop, id->process_interrupt_line, num);
+ dt_del_property(cpu, (struct dt_property *)prop);
+ dt_add_property(cpu, "ibm,ppc-interrupt-server#s",
+ new_prop, (num + 1) * sizeof(__be32));
+ free(new_prop);
+ }
+
+ add_icps();
+
+ return true;
+}
+
+void paca_parse(void)
+{
+ if (!__paca_parse()) {
+ prerror("CPU: Initial CPU parsing failed\n");
+ abort();
+ }
+}
diff --git a/hdata/pcia.c b/hdata/pcia.c
new file mode 100644
index 0000000..d128a31
--- /dev/null
+++ b/hdata/pcia.c
@@ -0,0 +1,242 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include "spira.h"
+#include <cpu.h>
+#include <fsp.h>
+#include <opal.h>
+#include <ccan/str/str.h>
+#include <device.h>
+
+#include "hdata.h"
+
+#define PCIA_MAX_THREADS 8
+
+static unsigned int pcia_index(const void *pcia)
+{
+ return (pcia - (void *)get_hdif(&spira.ntuples.pcia, "SPPCIA"))
+ / spira.ntuples.pcia.alloc_len;
+}
+
+static const struct sppcia_cpu_thread *find_tada(const void *pcia,
+ unsigned int thread)
+{
+ unsigned int count = HDIF_get_iarray_size(pcia,
+ SPPCIA_IDATA_THREAD_ARRAY);
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ const struct sppcia_cpu_thread *t;
+ unsigned int size;
+
+ t = HDIF_get_iarray_item(pcia, SPPCIA_IDATA_THREAD_ARRAY,
+ i, &size);
+ if (!t || size < sizeof(*t))
+ continue;
+ if (be32_to_cpu(t->phys_thread_id) == thread)
+ return t;
+ }
+ return NULL;
+}
+
+static void add_icp(const void *pcia, u32 tcount, const char *compat)
+{
+ const struct sppcia_cpu_thread *t;
+ struct dt_node *icp;
+ __be64 *reg;
+ u32 i, irange[2], rsize;
+
+ rsize = tcount * 2 * sizeof(__be64);
+ reg = malloc(rsize);
+ assert(reg);
+
+ /* Suppresses uninitialized warning from gcc */
+ irange[0] = 0;
+ for (i = 0; i < tcount; i++) {
+ t = find_tada(pcia, i);
+ assert(t);
+ if (i == 0)
+ irange[0] = be32_to_cpu(t->proc_int_line);
+ reg[i * 2] = cpu_to_be64(cleanup_addr(be64_to_cpu(t->ibase)));
+ reg[i * 2 + 1] = cpu_to_be64(0x1000);
+ }
+ irange[1] = tcount;
+
+ icp = dt_new_addr(dt_root, "interrupt-controller", be64_to_cpu(reg[0]));
+ if (!icp) {
+ free(reg);
+ return;
+ }
+
+ if (compat)
+ dt_add_property_strings(icp, "compatible", "ibm,ppc-xicp", compat);
+ else
+ dt_add_property_strings(icp, "compatible", "ibm,ppc-xicp");
+ dt_add_property_cells(icp, "ibm,interrupt-server-ranges",
+ irange[0], irange[1]);
+ dt_add_property(icp, "interrupt-controller", NULL, 0);
+ dt_add_property(icp, "reg", reg, rsize);
+ dt_add_property_cells(icp, "#address-cells", 0);
+ dt_add_property_cells(icp, "#interrupt-cells", 1);
+ dt_add_property_string(icp, "device_type",
+ "PowerPC-External-Interrupt-Presentation");
+ free(reg);
+}
+
+static struct dt_node *add_core_node(struct dt_node *cpus,
+ const void *pcia,
+ const struct sppcia_core_unique *id,
+ bool okay)
+{
+ const struct sppcia_cpu_thread *t;
+ const struct sppcia_cpu_timebase *timebase;
+ const struct sppcia_cpu_cache *cache;
+ const struct sppcia_cpu_attr *attr;
+ struct dt_node *cpu;
+ const char *icp_compat;
+ u32 i, size, threads, ve_flags, l2_phandle, chip_id;
+ __be32 iserv[PCIA_MAX_THREADS];
+
+ /* Look for thread 0 */
+ t = find_tada(pcia, 0);
+ if (!t) {
+ prerror("CORE[%i]: Failed to find thread 0 !\n",
+ pcia_index(pcia));
+ return NULL;
+ }
+
+ ve_flags = be32_to_cpu(id->verif_exist_flags);
+ threads = ((ve_flags & CPU_ID_NUM_SECONDARY_THREAD_MASK)
+ >> CPU_ID_NUM_SECONDARY_THREAD_SHIFT) + 1;
+ assert(threads <= PCIA_MAX_THREADS);
+
+ printf("CORE[%i]: PIR=%i RES=%i %s %s(%u threads)\n",
+ pcia_index(pcia), t->pir, t->proc_int_line,
+ ve_flags & CPU_ID_PACA_RESERVED
+ ? "**RESERVED**" : cpu_state(ve_flags),
+ be32_to_cpu(t->pir) == boot_cpu->pir ? "[boot] " : "", threads);
+
+ timebase = HDIF_get_idata(pcia, SPPCIA_IDATA_TIMEBASE, &size);
+ if (!timebase || size < sizeof(*timebase)) {
+ prerror("CORE[%i]: bad timebase size %u @ %p\n",
+ pcia_index(pcia), size, timebase);
+ return NULL;
+ }
+
+ cache = HDIF_get_idata(pcia, SPPCIA_IDATA_CPU_CACHE, &size);
+ if (!cache || size < sizeof(*cache)) {
+ prerror("CORE[%i]: bad cache size %u @ %p\n",
+ pcia_index(pcia), size, cache);
+ return NULL;
+ }
+
+ cpu = add_core_common(cpus, cache, timebase,
+ be32_to_cpu(t->proc_int_line), okay);
+
+ /* Core attributes */
+ attr = HDIF_get_idata(pcia, SPPCIA_IDATA_CPU_ATTR, &size);
+ if (attr)
+ add_core_attr(cpu, be32_to_cpu(attr->attr));
+
+ /* Add cache info */
+ l2_phandle = add_core_cache_info(cpus, cache,
+ be32_to_cpu(t->proc_int_line), okay);
+ dt_add_property_cells(cpu, "l2-cache", l2_phandle);
+
+ if (proc_gen == proc_gen_p7)
+ icp_compat = "IBM,power7-icp";
+ else
+ icp_compat = "IBM,power8-icp";
+
+ /* Get HW Chip ID */
+ chip_id = pcid_to_chip_id(be32_to_cpu(id->proc_chip_id));
+
+ dt_add_property_cells(cpu, "ibm,pir", be32_to_cpu(t->pir));
+ dt_add_property_cells(cpu, "ibm,chip-id", chip_id);
+
+ /* Build ibm,ppc-interrupt-server#s with all threads */
+ for (i = 0; i < threads; i++) {
+ t = find_tada(pcia, i);
+ if (!t) {
+ threads = i;
+ break;
+ }
+ iserv[i] = t->proc_int_line;
+ assert(t->proc_int_line == t->pir);
+ }
+
+ dt_add_property(cpu, "ibm,ppc-interrupt-server#s", iserv, 4 * threads);
+
+ /* Add the ICP node for this CPU */
+ add_icp(pcia, threads, icp_compat);
+
+ return cpu;
+}
+
+bool pcia_parse(void)
+{
+ const void *pcia;
+ struct dt_node *cpus;
+ bool got_pcia = false;
+
+ /* Check PCIA exists... if not, maybe we are getting a PACA ? */
+ pcia = get_hdif(&spira.ntuples.pcia, "SPPCIA");
+ if (!pcia)
+ return false;
+
+ printf("Got PCIA !\n");
+
+ got_pcia = true;
+
+ cpus = dt_new(dt_root, "cpus");
+ dt_add_property_cells(cpus, "#address-cells", 1);
+ dt_add_property_cells(cpus, "#size-cells", 0);
+
+ for_each_pcia(pcia) {
+ const struct sppcia_core_unique *id;
+ u32 size, ve_flags;
+ bool okay;
+
+ id = HDIF_get_idata(pcia, SPPCIA_IDATA_CORE_UNIQUE, &size);
+ if (!id || size < sizeof(*id)) {
+ prerror("CORE[%i]: bad id size %u @ %p\n",
+ pcia_index(pcia), size, id);
+ return false;
+ }
+ ve_flags = be32_to_cpu(id->verif_exist_flags);
+
+ switch ((ve_flags & CPU_ID_VERIFY_MASK)
+ >> CPU_ID_VERIFY_SHIFT) {
+ case CPU_ID_VERIFY_USABLE_NO_FAILURES:
+ case CPU_ID_VERIFY_USABLE_FAILURES:
+ okay = true;
+ break;
+ default:
+ okay = false;
+ }
+
+ printf("CORE[%i]: HW_PROC_ID=%i PROC_CHIP_ID=%i EC=0x%x %s\n",
+ pcia_index(pcia), be32_to_cpu(id->hw_proc_id),
+ be32_to_cpu(id->proc_chip_id),
+ be32_to_cpu(id->chip_ec_level),
+ okay ? "OK" : "UNAVAILABLE");
+
+ if (!add_core_node(cpus, pcia, id, okay))
+ break;
+ }
+ return got_pcia;
+}
diff --git a/hdata/slca.c b/hdata/slca.c
new file mode 100644
index 0000000..a709aaa
--- /dev/null
+++ b/hdata/slca.c
@@ -0,0 +1,89 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <device.h>
+#include "spira.h"
+#include "hdata.h"
+
+const struct slca_entry *slca_get_entry(uint16_t slca_index)
+{
+ struct HDIF_common_hdr *slca_hdr;
+ int count;
+
+ slca_hdr = get_hdif(&spira.ntuples.slca, SLCA_HDIF_SIG);
+ if (!slca_hdr) {
+ prerror("SLCA Invalid\n");
+ return NULL;
+ }
+
+ count = HDIF_get_iarray_size(slca_hdr, SLCA_IDATA_ARRAY);
+ if (count < 0) {
+ prerror("SLCA: Can't find SLCA array size!\n");
+ return NULL;
+ }
+
+ if (slca_index < count) {
+ const struct slca_entry *s_entry;
+ unsigned int entry_sz;
+ s_entry = HDIF_get_iarray_item(slca_hdr, SLCA_IDATA_ARRAY,
+ slca_index, &entry_sz);
+
+ if (s_entry && entry_sz >= sizeof(*s_entry))
+ return s_entry;
+ } else
+ printf("SLCA: Can't find slca_entry for index %d\n", slca_index);
+ return NULL;
+}
+
+const char *slca_get_vpd_name(uint16_t slca_index)
+{
+ const struct slca_entry *s_entry;
+
+ s_entry = slca_get_entry(slca_index);
+ if (s_entry)
+ return (const char *)s_entry->fru_id;
+ else
+ printf("SLCA: Can't find fru_id for index %d\n", slca_index);
+ return NULL;
+}
+
+const char *slca_get_loc_code_index(uint16_t slca_index)
+{
+ const struct slca_entry *s_entry;
+
+ s_entry = slca_get_entry(slca_index);
+ if (s_entry)
+ return s_entry->loc_code;
+ else
+ printf("SLCA: Entry %d bad idata\n", slca_index);
+
+ return NULL;
+}
+
+void slca_vpd_add_loc_code(struct dt_node *node, uint16_t slca_index)
+{
+ const char *fru_loc_code;
+ char loc_code[LOC_CODE_SIZE + 1];
+
+ memset(loc_code, 0, sizeof(loc_code));
+ fru_loc_code = slca_get_loc_code_index(slca_index);
+ if (!fru_loc_code)
+ return;
+
+ strncpy(loc_code, fru_loc_code, LOC_CODE_SIZE);
+ dt_add_property(node, "ibm,loc-code", loc_code, strlen(loc_code) + 1);
+}
diff --git a/hdata/spira.c b/hdata/spira.c
new file mode 100644
index 0000000..39a78e8
--- /dev/null
+++ b/hdata/spira.c
@@ -0,0 +1,965 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <device.h>
+#include "spira.h"
+#include <cpu.h>
+#include <memory.h>
+#include <vpd.h>
+#include <interrupts.h>
+#include <ccan/str/str.h>
+#include <chip.h>
+#include <fsp-mdst-table.h>
+
+#include "hdata.h"
+#include "hostservices.h"
+
+/* Processor Initialization structure, contains
+ * the initial NIA and MSR values for the entry
+ * point
+ *
+ * Note: It appears to be ignoring the entry point
+ * and always going to 0x180
+ */
+
+static int cpu_type;
+
+__section(".procin.data") struct proc_init_data proc_init_data = {
+ .hdr = HDIF_SIMPLE_HDR("PROCIN", 1, struct proc_init_data),
+ .regs_ptr = HDIF_IDATA_PTR(offsetof(struct proc_init_data, regs), 0x10),
+ .regs = {
+ .nia = CPU_TO_BE64(0x180),
+ .msr = CPU_TO_BE64(0x9000000000000000ULL), /* SF | HV */
+ },
+};
+
+/* Populate MDST table
+ *
+ * Note that we only pass sapphire console buffer here so that we can
+ * capture early failure logs. Later dump component (fsp_dump_mdst_init)
+ * creates new table with all the memory sections we are interested and
+ * sends updated table to FSP via MBOX.
+ *
+ * To help the FSP distinguishing between TCE tokens and actual physical
+ * addresses, we set the top bit to 1 on physical addresses
+ */
+#define ADDR_TOP_BIT (1ul << 63)
+
+__section(".mdst.data") struct dump_mdst_table init_mdst_table[2] = {
+ {
+ .addr = CPU_TO_BE64(INMEM_CON_START | ADDR_TOP_BIT),
+ .type = CPU_TO_BE32(DUMP_SECTION_CONSOLE),
+ .size = CPU_TO_BE32(INMEM_CON_LEN),
+ },
+ {
+ .addr = CPU_TO_BE64(HBRT_CON_START | ADDR_TOP_BIT),
+ .type = CPU_TO_BE32(DUMP_SECTION_HBRT_LOG),
+ .size = CPU_TO_BE32(HBRT_CON_LEN),
+ },
+};
+
+/* SP Interface Root Array, aka SPIRA */
+__section(".spira.data") struct spira spira = {
+ .hdr = HDIF_SIMPLE_HDR("SPIRA ", SPIRA_VERSION, struct spira),
+ .ntuples_ptr = HDIF_IDATA_PTR(offsetof(struct spira, ntuples),
+ sizeof(struct spira_ntuples)),
+ .ntuples = {
+ .array_hdr = {
+ .offset = CPU_TO_BE32(HDIF_ARRAY_OFFSET),
+ .ecnt = CPU_TO_BE32(SPIRA_NTUPLES_COUNT),
+ .esize
+ = CPU_TO_BE32(sizeof(struct spira_ntuple)),
+ .eactsz = CPU_TO_BE32(0x18),
+ },
+ /* We only populate some n-tuples */
+ .proc_init = {
+ .addr = CPU_TO_BE64(PROCIN_OFF),
+ .alloc_cnt = CPU_TO_BE16(1),
+ .act_cnt = CPU_TO_BE16(1),
+ .alloc_len
+ = CPU_TO_BE32(sizeof(struct proc_init_data)),
+ },
+ .heap = {
+ .addr = CPU_TO_BE64(SPIRA_HEAP_BASE),
+ .alloc_cnt = CPU_TO_BE16(1),
+ .alloc_len = CPU_TO_BE32(SPIRA_HEAP_SIZE),
+ },
+ .mdump_src = {
+ .addr = CPU_TO_BE64(MDST_TABLE_OFF),
+ .alloc_cnt = CPU_TO_BE16(ARRAY_SIZE(init_mdst_table)),
+ .act_cnt = CPU_TO_BE16(ARRAY_SIZE(init_mdst_table)),
+ .alloc_len =
+ CPU_TO_BE32(sizeof(init_mdst_table)),
+ },
+ },
+};
+
+/* Overridden for testing. */
+#ifndef spira_check_ptr
+bool spira_check_ptr(const void *ptr, const char *file, unsigned int line)
+{
+ if (!ptr)
+ return false;
+ if (((unsigned long)ptr) >= SPIRA_HEAP_BASE &&
+ ((unsigned long)ptr) < (SPIRA_HEAP_BASE + SPIRA_HEAP_SIZE))
+ return true;
+
+ prerror("SPIRA: Bad pointer %p at %s line %d\n", ptr, file, line);
+ return false;
+}
+#endif
+
+struct HDIF_common_hdr *__get_hdif(struct spira_ntuple *n, const char id[],
+ const char *file, int line)
+{
+ struct HDIF_common_hdr *h = ntuple_addr(n);
+ if (!spira_check_ptr(h, file, line))
+ return NULL;
+
+ if (!HDIF_check(h, id)) {
+ prerror("SPIRA: bad tuple %p: expected %s at %s line %d\n",
+ h, id, file, line);
+ return NULL;
+ }
+ return h;
+}
+
+static struct dt_node *add_xscom_node(uint64_t base, uint32_t hw_id,
+ uint32_t proc_chip_id)
+{
+ struct dt_node *node;
+ uint64_t addr, size;
+
+ addr = base | ((uint64_t)hw_id << PPC_BITLSHIFT(28));
+ size = (u64)1 << PPC_BITLSHIFT(28);
+
+ printf("XSCOM: Found HW ID 0x%x (PCID 0x%x) @ 0x%llx\n",
+ hw_id, proc_chip_id, (long long)addr);
+
+ node = dt_new_addr(dt_root, "xscom", addr);
+ if (!node)
+ return NULL;
+
+ dt_add_property_cells(node, "ibm,chip-id", hw_id);
+ dt_add_property_cells(node, "ibm,proc-chip-id", proc_chip_id);
+ dt_add_property_cells(node, "#address-cells", 1);
+ dt_add_property_cells(node, "#size-cells", 1);
+ dt_add_property(node, "scom-controller", NULL, 0);
+
+ switch(proc_gen) {
+ case proc_gen_p7:
+ dt_add_property_strings(node, "compatible",
+ "ibm,xscom", "ibm,power7-xscom");
+ break;
+ case proc_gen_p8:
+ dt_add_property_strings(node, "compatible",
+ "ibm,xscom", "ibm,power8-xscom");
+ break;
+ default:
+ dt_add_property_strings(node, "compatible", "ibm,xscom");
+ }
+ dt_add_property_u64s(node, "reg", addr, size);
+
+ return node;
+}
+
+struct dt_node *find_xscom_for_chip(uint32_t chip_id)
+{
+ struct dt_node *node;
+ uint32_t id;
+
+ dt_for_each_compatible(dt_root, node, "ibm,xscom") {
+ id = dt_get_chip_id(node);
+ if (id == chip_id)
+ return node;
+ }
+
+ return NULL;
+}
+
+static void add_psihb_node(struct dt_node *np)
+{
+ u32 psi_scom, psi_slen;
+ const char *psi_comp;
+
+ /*
+ * We add a few things under XSCOM that aren't added
+ * by any other HDAT path
+ */
+
+ /* PSI host bridge */
+ switch(proc_gen) {
+ case proc_gen_p7:
+ psi_scom = 0x2010c00;
+ psi_slen = 0x10;
+ psi_comp = "ibm,power7-psihb-x";
+ break;
+ case proc_gen_p8:
+ psi_scom = 0x2010900;
+ psi_slen = 0x20;
+ psi_comp = "ibm,power8-psihb-x";
+ break;
+ default:
+ psi_comp = NULL;
+ }
+ if (psi_comp) {
+ struct dt_node *psi_np;
+
+ psi_np = dt_new_addr(np, "psihb", psi_scom);
+ if (!psi_np)
+ return;
+
+ dt_add_property_cells(psi_np, "reg", psi_scom, psi_slen);
+ dt_add_property_strings(psi_np, "compatible", psi_comp,
+ "ibm,psihb-x");
+ }
+}
+
+static void add_xscom_add_pcia_assoc(struct dt_node *np, uint32_t pcid)
+{
+ const struct HDIF_common_hdr *hdr;
+ u32 size;
+
+
+ /*
+ * The SPPCRD doesn't contain all the affinity data, we have
+ * to dig it out of a core. I assume this is so that node
+ * affinity can be different for groups of cores within the
+ * chip, but for now we are going to ignore that
+ */
+ hdr = get_hdif(&spira.ntuples.pcia, SPPCIA_HDIF_SIG);
+ if (!hdr)
+ return;
+
+ for_each_pcia(hdr) {
+ const struct sppcia_core_unique *id;
+
+ id = HDIF_get_idata(hdr, SPPCIA_IDATA_CORE_UNIQUE, &size);
+ if (!id || size < sizeof(*id))
+ continue;
+
+ if (be32_to_cpu(id->proc_chip_id) != pcid)
+ continue;
+
+ dt_add_property_cells(np, "ibm,ccm-node-id",
+ be32_to_cpu(id->ccm_node_id));
+ dt_add_property_cells(np, "ibm,hw-card-id",
+ be32_to_cpu(id->hw_card_id));
+ dt_add_property_cells(np, "ibm,hw-module-id",
+ be32_to_cpu(id->hw_module_id));
+ if (!dt_find_property(np, "ibm,dbob-id"))
+ dt_add_property_cells(np, "ibm,dbob-id",
+ be32_to_cpu(id->drawer_book_octant_blade_id));
+ dt_add_property_cells(np, "ibm,mem-interleave-scope",
+ be32_to_cpu(id->memory_interleaving_scope));
+ return;
+ }
+}
+
+static bool add_xscom_sppcrd(uint64_t xscom_base)
+{
+ const struct HDIF_common_hdr *hdif;
+ unsigned int i, vpd_sz;
+ const void *vpd;
+ struct dt_node *np;
+
+ for_each_ntuple_idx(&spira.ntuples.proc_chip, hdif, i,
+ SPPCRD_HDIF_SIG) {
+ const struct sppcrd_chip_info *cinfo;
+ u32 ve, version;
+
+ cinfo = HDIF_get_idata(hdif, SPPCRD_IDATA_CHIP_INFO, NULL);
+ if (!CHECK_SPPTR(cinfo)) {
+ prerror("XSCOM: Bad ChipID data %d\n", i);
+ continue;
+ }
+
+ ve = be32_to_cpu(cinfo->verif_exist_flags) & CHIP_VERIFY_MASK;
+ ve >>= CHIP_VERIFY_SHIFT;
+ if (ve == CHIP_VERIFY_NOT_INSTALLED ||
+ ve == CHIP_VERIFY_UNUSABLE)
+ continue;
+
+ /* Create the XSCOM node */
+ np = add_xscom_node(xscom_base,
+ be32_to_cpu(cinfo->xscom_id),
+ be32_to_cpu(cinfo->proc_chip_id));
+ if (!np)
+ continue;
+
+ version = be16_to_cpu(hdif->version);
+
+ /* Version 0A has additional OCC related stuff */
+ if (version >= 0x000a) {
+ if (!dt_find_property(np, "ibm,dbob-id"))
+ dt_add_property_cells(np, "ibm,dbob-id",
+ be32_to_cpu(cinfo->dbob_id));
+ dt_add_property_cells(np, "ibm,occ-functional-state",
+ be32_to_cpu(cinfo->occ_state));
+ }
+
+ /* Add chip VPD */
+ dt_add_vpd_node(hdif, SPPCRD_IDATA_FRU_ID, SPPCRD_IDATA_KW_VPD);
+
+ /* Add module VPD on version A and later */
+ if (version >= 0x000a) {
+ vpd = HDIF_get_idata(hdif, SPPCRD_IDATA_MODULE_VPD,
+ &vpd_sz);
+ if (CHECK_SPPTR(vpd))
+ dt_add_property(np, "ibm,module-vpd", vpd,
+ vpd_sz);
+ }
+
+ /*
+ * Extract additional associativity information from
+ * the core data. Pick one core on that chip
+ */
+ add_xscom_add_pcia_assoc(np, be32_to_cpu(cinfo->proc_chip_id));
+
+ /* Add PSI Host bridge */
+ add_psihb_node(np);
+ }
+
+ return i > 0;
+}
+
+static void add_xscom_sppaca(uint64_t xscom_base)
+{
+ const struct HDIF_common_hdr *hdif;
+ unsigned int i;
+ struct dt_node *np;
+
+ for_each_ntuple_idx(&spira.ntuples.paca, hdif, i, PACA_HDIF_SIG) {
+ const struct sppaca_cpu_id *id;
+ unsigned int chip_id, size;
+ int ve;
+
+ /* We only suport old style PACA on P7 ! */
+ assert(proc_gen == proc_gen_p7);
+
+ id = HDIF_get_idata(hdif, SPPACA_IDATA_CPU_ID, &size);
+
+ if (!CHECK_SPPTR(id)) {
+ prerror("XSCOM: Bad processor data %d\n", i);
+ continue;
+ }
+
+ ve = be32_to_cpu(id->verify_exists_flags) & CPU_ID_VERIFY_MASK;
+ ve >>= CPU_ID_VERIFY_SHIFT;
+ if (ve == CPU_ID_VERIFY_NOT_INSTALLED ||
+ ve == CPU_ID_VERIFY_UNUSABLE)
+ continue;
+
+ /* Convert to HW chip ID */
+ chip_id = P7_PIR2GCID(be32_to_cpu(id->pir));
+
+ /* do we already have an XSCOM for this chip? */
+ if (find_xscom_for_chip(chip_id))
+ continue;
+
+ /* Create the XSCOM node */
+ np = add_xscom_node(xscom_base, chip_id,
+ be32_to_cpu(id->processor_chip_id));
+ if (!np)
+ continue;
+
+ /* Add chip VPD */
+ dt_add_vpd_node(hdif, SPPACA_IDATA_FRU_ID, SPPACA_IDATA_KW_VPD);
+
+ /* Add chip associativity data */
+ dt_add_property_cells(np, "ibm,ccm-node-id",
+ be32_to_cpu(id->ccm_node_id));
+ if (size > SPIRA_CPU_ID_MIN_SIZE) {
+ dt_add_property_cells(np, "ibm,hw-card-id",
+ be32_to_cpu(id->hw_card_id));
+ dt_add_property_cells(np, "ibm,hw-module-id",
+ be32_to_cpu(id->hardware_module_id));
+ if (!dt_find_property(np, "ibm,dbob-id"))
+ dt_add_property_cells(np, "ibm,dbob-id",
+ be32_to_cpu(id->drawer_book_octant_blade_id));
+ dt_add_property_cells(np, "ibm,mem-interleave-scope",
+ be32_to_cpu(id->memory_interleaving_scope));
+ }
+
+ /* Add PSI Host bridge */
+ add_psihb_node(np);
+ }
+}
+
+static void add_xscom(void)
+{
+ const void *ms_vpd;
+ const struct msvpd_pmover_bsr_synchro *pmbs;
+ unsigned int size;
+ uint64_t xscom_base;
+
+ ms_vpd = get_hdif(&spira.ntuples.ms_vpd, MSVPD_HDIF_SIG);
+ if (!ms_vpd) {
+ prerror("XSCOM: Can't find MS VPD\n");
+ return;
+ }
+
+ pmbs = HDIF_get_idata(ms_vpd, MSVPD_IDATA_PMOVER_SYNCHRO, &size);
+ if (!CHECK_SPPTR(pmbs) || size < sizeof(*pmbs)) {
+ prerror("XSCOM: absent or bad PMBS size %u @ %p\n", size, pmbs);
+ return;
+ }
+
+ if (!(be32_to_cpu(pmbs->flags) & MSVPD_PMS_FLAG_XSCOMBASE_VALID)) {
+ prerror("XSCOM: No XSCOM base in PMBS, using default\n");
+ return;
+ }
+
+ xscom_base = be64_to_cpu(pmbs->xscom_addr);
+
+ /* Some FSP (on P7) give me a crap base address for XSCOM (it has
+ * spurious bits set as far as I can tell). Since only 5 bits 18:22 can
+ * be programmed in hardware, let's isolate these. This seems to give
+ * me the right value on VPL1
+ */
+ if (cpu_type == PVR_TYPE_P7)
+ xscom_base &= 0x80003e0000000000ul;
+
+ /* Get rid of the top bits */
+ xscom_base = cleanup_addr(xscom_base);
+
+ /* First, try the new proc_chip ntuples for chip data */
+ if (add_xscom_sppcrd(xscom_base))
+ return;
+
+ /* Otherwise, check the old-style PACA, looking for unique chips */
+ add_xscom_sppaca(xscom_base);
+}
+
+static void add_chiptod_node(unsigned int chip_id, int flags)
+{
+ struct dt_node *node, *xscom_node;
+ const char *compat_str;
+ uint32_t addr, len;
+
+ if ((flags & CHIPTOD_ID_FLAGS_STATUS_MASK) !=
+ CHIPTOD_ID_FLAGS_STATUS_OK)
+ return;
+
+ xscom_node = find_xscom_for_chip(chip_id);
+ if (!xscom_node) {
+ prerror("CHIPTOD: No xscom for chiptod %d?\n", chip_id);
+ return;
+ }
+
+ addr = 0x40000;
+ len = 0x34;
+
+ switch(proc_gen) {
+ case proc_gen_p7:
+ compat_str = "ibm,power7-chiptod";
+ break;
+ case proc_gen_p8:
+ compat_str = "ibm,power8-chiptod";
+ break;
+ default:
+ return;
+ }
+
+ printf("CHIPTOD: Found on chip 0x%x %s\n", chip_id,
+ (flags & CHIPTOD_ID_FLAGS_PRIMARY) ? "[primary]" :
+ ((flags & CHIPTOD_ID_FLAGS_SECONDARY) ? "[secondary]" : ""));
+
+ node = dt_new_addr(xscom_node, "chiptod", addr);
+ if (!node)
+ return;
+
+ dt_add_property_cells(node, "reg", addr, len);
+ dt_add_property_strings(node, "compatible", "ibm,power-chiptod",
+ compat_str);
+
+ if (flags & CHIPTOD_ID_FLAGS_PRIMARY)
+ dt_add_property(node, "primary", NULL, 0);
+ if (flags & CHIPTOD_ID_FLAGS_SECONDARY)
+ dt_add_property(node, "secondary", NULL, 0);
+}
+
+static bool add_chiptod_old(void)
+{
+ const void *hdif;
+ unsigned int i;
+ bool found = false;
+
+ /*
+ * Locate chiptod ID structures in SPIRA
+ */
+ if (!get_hdif(&spira.ntuples.chip_tod, "TOD "))
+ return found;
+
+ for_each_ntuple_idx(&spira.ntuples.chip_tod, hdif, i, "TOD ") {
+ const struct chiptod_chipid *id;
+
+ id = HDIF_get_idata(hdif, CHIPTOD_IDATA_CHIPID, NULL);
+ if (!CHECK_SPPTR(id)) {
+ prerror("CHIPTOD: Bad ChipID data %d\n", i);
+ continue;
+ }
+
+ add_chiptod_node(pcid_to_chip_id(be32_to_cpu(id->chip_id)),
+ be32_to_cpu(id->flags));
+ found = true;
+ }
+ return found;
+}
+
+static bool add_chiptod_new(uint32_t master_cpu)
+{
+ const void *hdif;
+ unsigned int i, master_chip;
+ bool found = false;
+
+ /*
+ * Locate Proc Chip ID structures in SPIRA
+ */
+ if (!get_hdif(&spira.ntuples.proc_chip, SPPCRD_HDIF_SIG))
+ return found;
+
+ master_chip = pir_to_chip_id(master_cpu);
+
+ for_each_ntuple_idx(&spira.ntuples.proc_chip, hdif, i,
+ SPPCRD_HDIF_SIG) {
+ const struct sppcrd_chip_info *cinfo;
+ const struct sppcrd_chip_tod *tinfo;
+ unsigned int size;
+ u32 ve, flags;
+
+ cinfo = HDIF_get_idata(hdif, SPPCRD_IDATA_CHIP_INFO, NULL);
+ if (!CHECK_SPPTR(cinfo)) {
+ prerror("CHIPTOD: Bad ChipID data %d\n", i);
+ continue;
+ }
+
+ ve = be32_to_cpu(cinfo->verif_exist_flags) & CHIP_VERIFY_MASK;
+ ve >>= CHIP_VERIFY_SHIFT;
+ if (ve == CHIP_VERIFY_NOT_INSTALLED ||
+ ve == CHIP_VERIFY_UNUSABLE)
+ continue;
+
+ tinfo = HDIF_get_idata(hdif, SPPCRD_IDATA_CHIP_TOD, &size);
+ if (!CHECK_SPPTR(tinfo)) {
+ prerror("CHIPTOD: Bad TOD data %d\n", i);
+ continue;
+ }
+
+ flags = be32_to_cpu(tinfo->flags);
+
+ /* The FSP may strip the chiptod info from HDAT; if we find
+ * a zero-ed out entry, assume that the chiptod is
+ * present, but we don't have any primary/secondary info. In
+ * this case, pick the primary based on the CPU that was
+ * assigned master.
+ */
+ if (!size) {
+ flags = CHIPTOD_ID_FLAGS_STATUS_OK;
+ if (be32_to_cpu(cinfo->xscom_id) == master_chip)
+ flags |= CHIPTOD_ID_FLAGS_PRIMARY;
+ }
+
+ add_chiptod_node(be32_to_cpu(cinfo->xscom_id), flags);
+ found = true;
+ }
+ return found;
+}
+
+static void add_nx_node(u32 gcid)
+{
+ struct dt_node *nx;
+ const char *cp_str;
+ u32 addr;
+ u32 size;
+ struct dt_node *xscom;
+
+ xscom = find_xscom_for_chip(gcid);
+ if (xscom == NULL) {
+ prerror("NX%d: did not found xscom node.\n", gcid);
+ return;
+ }
+
+ /*
+ * The NX register space is relatively self contained on P7+ but
+ * a bit more messy on P8. However it's all contained within the
+ * PB chiplet port 1 so we'll stick to that in the "reg" property
+ * and let the NX "driver" deal with the details.
+ */
+ addr = 0x2010000;
+ size = 0x0004000;
+
+ switch (proc_gen) {
+ case proc_gen_p7:
+ cp_str = "ibm,power7-nx";
+ break;
+ case proc_gen_p8:
+ cp_str = "ibm,power8-nx";
+ break;
+ default:
+ return;
+ }
+ nx = dt_new_addr(xscom, "nx", addr);
+ if (!nx)
+ return;
+
+ dt_add_property_cells(nx, "reg", addr, size);
+ dt_add_property_strings(nx, "compatible", "ibm,power-nx", cp_str);
+}
+
+static void add_nx(void)
+{
+ unsigned int i;
+ void *hdif;
+
+ for_each_ntuple_idx(&spira.ntuples.proc_chip, hdif, i,
+ SPPCRD_HDIF_SIG) {
+ const struct sppcrd_chip_info *cinfo;
+ u32 ve;
+
+ cinfo = HDIF_get_idata(hdif, SPPCRD_IDATA_CHIP_INFO, NULL);
+ if (!CHECK_SPPTR(cinfo)) {
+ prerror("NX: Bad ChipID data %d\n", i);
+ continue;
+ }
+
+ ve = be32_to_cpu(cinfo->verif_exist_flags) & CHIP_VERIFY_MASK;
+ ve >>= CHIP_VERIFY_SHIFT;
+ if (ve == CHIP_VERIFY_NOT_INSTALLED ||
+ ve == CHIP_VERIFY_UNUSABLE)
+ continue;
+
+ if (cinfo->nx_state)
+ add_nx_node(be32_to_cpu(cinfo->xscom_id));
+ }
+}
+
+
+static void add_iplparams_sys_params(const void *iplp, struct dt_node *node)
+{
+ const struct iplparams_sysparams *p;
+ u32 sys_type;
+ const char *sys_family;
+
+ p = HDIF_get_idata(iplp, IPLPARAMS_SYSPARAMS, NULL);
+ if (!CHECK_SPPTR(p)) {
+ prerror("IPLPARAMS: No SYS Parameters\n");
+ /* Create a generic compatible property */
+ dt_add_property_string(dt_root, "compatible", "ibm,powernv");
+ return;
+ }
+
+ node = dt_new(node, "sys-params");
+ assert(node);
+ dt_add_property_cells(node, "#address-cells", 0);
+ dt_add_property_cells(node, "#size-cells", 0);
+
+ dt_add_property_nstr(node, "ibm,sys-model", p->sys_model, 4);
+
+ /* Compatible is 2 entries: ibm,powernv and ibm,<platform>
+ */
+ sys_type = be32_to_cpu(p->system_type);
+ switch(sys_type >> 28) {
+ case 0:
+ sys_family = "ibm,squadrons";
+ break;
+ case 1:
+ sys_family = "ibm,eclipz";
+ break;
+ case 2:
+ sys_family = "ibm,apollo";
+ break;
+ case 3:
+ sys_family = "ibm,firenze";
+ break;
+ default:
+ sys_family = NULL;
+ prerror("IPLPARAMS: Unknown system family\n");
+ break;
+ }
+ dt_add_property_strings(dt_root, "compatible", "ibm,powernv",
+ sys_family);
+}
+
+static void add_iplparams_ipl_params(const void *iplp, struct dt_node *node)
+{
+ const struct iplparams_iplparams *p;
+
+ p = HDIF_get_idata(iplp, IPLPARAMS_IPLPARAMS, NULL);
+ if (!CHECK_SPPTR(p)) {
+ prerror("IPLPARAMS: No IPL Parameters\n");
+ return;
+ }
+
+ node = dt_new(node, "ipl-params");
+ assert(node);
+ dt_add_property_cells(node, "#address-cells", 0);
+ dt_add_property_cells(node, "#size-cells", 0);
+
+ /* On an ASM initiated factory reset, this bit will be set
+ * and the FSP expects the firmware to reset the PCI bus
+ * numbers and respond with a Power Down (CE,4D,02) message
+ */
+ if (p->other_attrib & IPLPARAMS_OATTR_RST_PCI_BUSNO)
+ dt_add_property_cells(node, "pci-busno-reset-ipl", 1);
+ dt_add_property_strings(node, "cec-ipl-side",
+ (p->ipl_side & IPLPARAMS_CEC_FW_IPL_SIDE_TEMP) ?
+ "temp" : "perm");
+ dt_add_property_strings(node, "fsp-ipl-side",
+ (p->ipl_side & IPLPARAMS_FSP_FW_IPL_SIDE_TEMP) ?
+ "temp" : "perm");
+ dt_add_property_cells(node, "os-ipl-mode", p->os_ipl_mode);
+ dt_add_property_strings(node, "cec-major-type",
+ p->cec_ipl_maj_type ? "hot" : "cold");
+}
+
+static void add_iplparams_serials(const void *iplp, struct dt_node *node)
+{
+ const struct iplparms_serial *ipser;
+ struct dt_node *ser_node;
+ int count, i;
+
+ count = HDIF_get_iarray_size(iplp, IPLPARMS_IDATA_SERIAL);
+ if (!count) {
+ prerror("IPLPARAMS: No serial ports\n");
+ return;
+ }
+ prerror("IPLPARAMS: %d serial ports in array\n", count);
+
+ node = dt_new(node, "fsp-serial");
+ assert(node);
+ dt_add_property_cells(node, "#address-cells", 1);
+ dt_add_property_cells(node, "#size-cells", 0);
+
+ for (i = 0; i < count; i++) {
+ u16 rsrc_id;
+ ipser = HDIF_get_iarray_item(iplp, IPLPARMS_IDATA_SERIAL,
+ i, NULL);
+ if (!CHECK_SPPTR(ipser))
+ continue;
+ rsrc_id = be16_to_cpu(ipser->rsrc_id);
+ printf("IPLPARAMS: Serial %d rsrc: %04x loc: %s\n",
+ i, rsrc_id, ipser->loc_code);
+ ser_node = dt_new_addr(node, "serial", rsrc_id);
+ if (!ser_node)
+ continue;
+
+ dt_add_property_cells(ser_node, "reg", rsrc_id);
+ dt_add_property_nstr(ser_node, "ibm,loc-code",
+ ipser->loc_code, LOC_CODE_SIZE);
+ dt_add_property_string(ser_node, "compatible",
+ "ibm,fsp-serial");
+ /* XXX handle CALLHOME flag ? */
+ }
+}
+
+/*
+ * Check for platform dump, if present populate DT
+ */
+static void add_iplparams_platform_dump(const void *iplp, struct dt_node *node)
+{
+ const struct iplparams_dump *ipl_dump;
+
+ ipl_dump = HDIF_get_idata(iplp, IPLPARAMS_PLATFORM_DUMP, NULL);
+ if (!CHECK_SPPTR(ipl_dump))
+ return;
+
+ node = dt_new(node, "platform-dump");
+ assert(node);
+
+ if (be32_to_cpu(ipl_dump->dump_id)) {
+ dt_add_property_cells(node, "dump-id",
+ be32_to_cpu(ipl_dump->dump_id));
+ dt_add_property_u64(node, "total-size",
+ be64_to_cpu(ipl_dump->act_dump_sz));
+ dt_add_property_u64(node, "hw-dump-size",
+ be32_to_cpu(ipl_dump->act_hw_dump_sz));
+ dt_add_property_cells(node, "plog-id",
+ be32_to_cpu(ipl_dump->plid));
+ }
+}
+
+static void add_iplparams(void)
+{
+ struct dt_node *iplp_node;
+ const void *ipl_parms;
+
+ ipl_parms = get_hdif(&spira.ntuples.ipl_parms, "IPLPMS");
+ if (!ipl_parms) {
+ prerror("IPLPARAMS: Cannot find IPL Parms in SPIRA\n");
+ return;
+ }
+
+ iplp_node = dt_new(dt_root, "ipl-params");
+ assert(iplp_node);
+ dt_add_property_cells(iplp_node, "#address-cells", 0);
+ dt_add_property_cells(iplp_node, "#size-cells", 0);
+
+ add_iplparams_sys_params(ipl_parms, iplp_node);
+ add_iplparams_ipl_params(ipl_parms, iplp_node);
+ add_iplparams_serials(ipl_parms, iplp_node);
+ add_iplparams_platform_dump(ipl_parms, iplp_node);
+}
+
+/* Various structure contain a "proc_chip_id" which is an arbitrary
+ * numbering used by HDAT to reference chips, which doesn't correspond
+ * to the HW IDs. We want to use the HW IDs everywhere in the DT so
+ * we convert using this.
+ *
+ * Note: On P7, the HW ID is the XSCOM "GCID" including the T bit which
+ * is *different* from the chip ID portion of the interrupt server#
+ * (or PIR). See the explanations in chip.h
+ */
+uint32_t pcid_to_chip_id(uint32_t proc_chip_id)
+{
+ unsigned int i;
+ const void *hdif;
+
+ /* First, try the proc_chip ntuples for chip data */
+ for_each_ntuple_idx(&spira.ntuples.proc_chip, hdif, i,
+ SPPCRD_HDIF_SIG) {
+ const struct sppcrd_chip_info *cinfo;
+
+ cinfo = HDIF_get_idata(hdif, SPPCRD_IDATA_CHIP_INFO,
+ NULL);
+ if (!CHECK_SPPTR(cinfo)) {
+ prerror("XSCOM: Bad ChipID data %d\n", i);
+ continue;
+ }
+ if (proc_chip_id == be32_to_cpu(cinfo->proc_chip_id))
+ return be32_to_cpu(cinfo->xscom_id);
+ }
+
+ /* Otherwise, check the old-style PACA, looking for unique chips */
+ for_each_ntuple_idx(&spira.ntuples.paca, hdif, i, PACA_HDIF_SIG) {
+ const struct sppaca_cpu_id *id;
+
+ /* We only suport old style PACA on P7 ! */
+ assert(proc_gen == proc_gen_p7);
+
+ id = HDIF_get_idata(hdif, SPPACA_IDATA_CPU_ID, NULL);
+
+ if (!CHECK_SPPTR(id)) {
+ prerror("XSCOM: Bad processor data %d\n", i);
+ continue;
+ }
+
+ if (proc_chip_id == be32_to_cpu(id->processor_chip_id))
+ return P7_PIR2GCID(be32_to_cpu(id->pir));
+ }
+
+ /* Not found, what to do ? Assert ? For now return a number
+ * guaranteed to not exist
+ */
+ return (uint32_t)-1;
+}
+
+static void dt_init_vpd_node(void)
+{
+ struct dt_node *dt_vpd;
+
+ dt_vpd = dt_new(dt_root, "vpd");
+ assert(dt_vpd);
+}
+
+static void hostservices_parse(void)
+{
+ struct HDIF_common_hdr *hs_hdr;
+ const void *dt_blob;
+ unsigned int size;
+ unsigned int ntuples_size;
+
+ ntuples_size = sizeof(struct HDIF_array_hdr) +
+ be32_to_cpu(spira.ntuples.array_hdr.ecnt) *
+ sizeof(struct spira_ntuple);
+
+ if (offsetof(struct spira_ntuples, hs_data) >= ntuples_size) {
+ prerror("SPIRA: No host services data found\n");
+ return;
+ }
+
+ hs_hdr = get_hdif(&spira.ntuples.hs_data, HSERV_HDIF_SIG);
+ if (!hs_hdr) {
+ prerror("SPIRA: No host services data found\n");
+ return;
+ }
+
+ dt_blob = HDIF_get_idata(hs_hdr, 0, &size);
+ if (!dt_blob) {
+ prerror("SPIRA: No host services idata found\n");
+ return;
+ }
+ hservices_from_hdat(dt_blob, size);
+}
+
+void parse_hdat(bool is_opal, uint32_t master_cpu)
+{
+ cpu_type = PVR_TYPE(mfspr(SPR_PVR));
+
+ printf("\n");
+ printf("-----------------------------------------------\n");
+ printf("-------------- Parsing HDAT ... ---------------\n");
+ printf("-----------------------------------------------\n");
+ printf("\n");
+
+ dt_root = dt_new_root("");
+
+ /*
+ * Basic DT root stuff
+ */
+ dt_add_property_cells(dt_root, "#address-cells", 2);
+ dt_add_property_cells(dt_root, "#size-cells", 2);
+ dt_add_property_string(dt_root, "lid-type", is_opal ? "opal" : "phyp");
+
+ /* Create /vpd node */
+ dt_init_vpd_node();
+
+ /* Parse SPPACA and/or PCIA */
+ if (!pcia_parse())
+ paca_parse();
+
+ /* IPL params */
+ add_iplparams();
+
+ /* Parse MS VPD */
+ memory_parse();
+
+ /* Add XSCOM node (must be before chiptod & IO ) */
+ add_xscom();
+
+ /* Add FSP */
+ fsp_parse();
+
+ /* Add ChipTOD's */
+ if (!add_chiptod_old() && !add_chiptod_new(master_cpu))
+ prerror("CHIPTOD: No ChipTOD found !\n");
+
+ /* Add NX */
+ add_nx();
+
+ /* Add IO HUBs and/or PHBs */
+ io_parse();
+
+ /* Parse VPD */
+ vpd_parse();
+
+ /* Host services information. */
+ hostservices_parse();
+
+ printf("\n");
+ printf("-----------------------------------------------\n");
+ printf("\n");
+}
diff --git a/hdata/spira.h b/hdata/spira.h
new file mode 100644
index 0000000..93239d0
--- /dev/null
+++ b/hdata/spira.h
@@ -0,0 +1,864 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SPIRA_H
+#define __SPIRA_H
+
+#include "hdif.h"
+
+/*
+ * The SPIRA structure
+ *
+ * NOTE: This is one of the only HDIF structure that we layout entirely
+ * as a C struct because it's provided by us to the FSP. Almost everything
+ * else is generated by the FSP, and thus must be "parsed" since the various
+ * offsets and alignments might change.
+ */
+
+#define SPIRA_VERSION 0x20 /* Like 730 ? */
+
+struct spira_ntuple {
+ __be64 addr;
+ __be16 alloc_cnt;
+ __be16 act_cnt;
+ __be32 alloc_len;
+ __be32 act_len;
+ __be32 tce_off;
+ __be64 padding;
+} __packed;
+
+#define SPIRA_NTUPLES_COUNT 0x18
+
+struct spira_ntuples {
+ struct HDIF_array_hdr array_hdr;
+ struct spira_ntuple sp_subsys; /* 0x040 */
+ struct spira_ntuple ipl_parms; /* 0x060 */
+ struct spira_ntuple nt_enclosure_vpd; /* 0x080 */
+ struct spira_ntuple slca; /* 0x0a0 */
+ struct spira_ntuple backplane_vpd; /* 0x0c0 */
+ struct spira_ntuple system_vpd; /* 0x0e0 */
+ struct spira_ntuple chip_tod; /* 0x100 */
+ struct spira_ntuple proc_init; /* 0x120 */
+ struct spira_ntuple clock_vpd; /* 0x140 */
+ struct spira_ntuple anchor_vpd; /* 0x160 */
+ struct spira_ntuple op_panel_vpd; /* 0x180 */
+ struct spira_ntuple ext_cache_fru_vpd; /* 0x1a0 */
+ struct spira_ntuple misc_cec_fru_vpd; /* 0x1c0 */
+ struct spira_ntuple paca; /* 0x1e0 */
+ struct spira_ntuple ms_vpd; /* 0x200 */
+ struct spira_ntuple cec_iohub_fru; /* 0x220 */
+ struct spira_ntuple cpu_ctrl; /* 0x240 */
+ struct spira_ntuple mdump_src; /* 0x260 */
+ struct spira_ntuple mdump_dst; /* 0x280 */
+ struct spira_ntuple mdump_res; /* 0x2a0 */
+ struct spira_ntuple heap; /* 0x2c0 */
+ struct spira_ntuple pcia; /* 0x2e0 */
+ struct spira_ntuple proc_chip; /* 0x300 */
+ struct spira_ntuple hs_data; /* 0x320 */
+};
+
+struct spira {
+ struct HDIF_common_hdr hdr;
+ struct HDIF_idata_ptr ntuples_ptr;
+ __be64 pad;
+ struct spira_ntuples ntuples;
+ u8 reserved[0x4c0];
+} __packed __align(0x100);
+
+extern struct spira spira;
+
+/* This macro can be used to check the validity of a pointer returned
+ * by one of the HDIF API functions. It returns true if the pointer
+ * appears valid. If it's not valid and not NULL, it will print some
+ * error in the log as well.
+ */
+#define CHECK_SPPTR(_ptr) spira_check_ptr(_ptr, __FILE__, __LINE__)
+
+#define get_hdif(ntuple, id) __get_hdif((ntuple), (id), __FILE__, __LINE__)
+
+extern struct HDIF_common_hdr *__get_hdif(struct spira_ntuple *n,
+ const char id[],
+ const char *file, int line);
+
+#define for_each_ntuple_idx(_ntuples, _p, _idx, _id) \
+ for (_p = get_hdif((_ntuples), _id ""), _idx = 0; \
+ _p && _idx < be16_to_cpu((_ntuples)->act_cnt); \
+ _p = (void *)_p + be32_to_cpu((_ntuples)->alloc_len), _idx++)
+
+#define for_each_ntuple(_ntuples, _p, _id) \
+ for (_p = get_hdif((_ntuples), _id ""); \
+ _p && (void *)_p < ntuple_addr(_ntuples) \
+ + (be16_to_cpu((_ntuples)->act_cnt) * \
+ be32_to_cpu((_ntuples)->alloc_len)); \
+ _p = (void *)_p + be32_to_cpu((_ntuples)->alloc_len))
+
+#define for_each_paca(p) for_each_ntuple(&spira.ntuples.paca, p, PACA_HDIF_SIG)
+
+#define for_each_pcia(p) for_each_ntuple(&spira.ntuples.pcia, p, SPPCIA_HDIF_SIG)
+
+
+/* We override these for testing. */
+#ifndef ntuple_addr
+#define ntuple_addr(_ntuples) ((void *)BE64_TO_CPU((_ntuples)->addr))
+#endif
+
+#ifndef spira_check_ptr
+extern bool spira_check_ptr(const void *ptr, const char *file,
+ unsigned int line);
+#endif
+
+struct proc_init_data {
+ struct HDIF_common_hdr hdr;
+ struct HDIF_idata_ptr regs_ptr;
+ struct {
+ __be64 nia;
+ __be64 msr;
+ } regs;
+} __packed __align(0x10);
+
+/*
+ * The FRU ID structure is used in several tuples, so we
+ * define it generically here
+ */
+struct spira_fru_id {
+ __be16 slca_index;
+ __be16 rsrc_id; /* formerly VPD port number */
+};
+
+/*
+ * The FRU operational status structure is used in several
+ * tuples, so we define it generically here
+ */
+struct spira_fru_op_status {
+ uint8_t flags;
+#define FRU_OP_STATUS_FLAG_USED 0x02 /* If 0 -> not used (redundant) */
+#define FRU_OP_STATUS_FLAG_FUNCTIONAL 0x01 /* If 0 -> non-functional */
+ uint8_t reserved[3];
+};
+
+/*
+ * Move VPD related stuff to another file ...
+ */
+#define VPD_ID(_a, _b) ((_a) << 8 | (_b))
+
+/*
+ * Service Processor Subsystem Structure
+ *
+ * This structure contains several internal data blocks
+ * describing the service processor(s) in the system
+ */
+
+#define SPSS_HDIF_SIG "SPINFO"
+
+/* Idata index 0 : FRU ID Data */
+#define SPSS_IDATA_FRU_ID 0
+
+/* Idata index 1 : Keyword VPD for the FSP instance */
+#define SPSS_IDATA_KEYWORD_VPD 1
+
+/* Idata index 2 : SP Implementation */
+#define SPSS_IDATA_SP_IMPL 2
+
+struct spss_sp_impl {
+ __be16 hw_version;
+ __be16 sw_version;
+ __be16 func_flags;
+#define SPSS_SP_IMPL_FLAGS_INSTALLED 0x8000
+#define SPSS_SP_IMPL_FLAGS_FUNCTIONAL 0x4000
+#define SPSS_SP_IMPL_FLAGS_PRIMARY 0x2000
+ u8 chip_version;
+ u8 reserved;
+};
+
+/* Idata index 3 is deprecated */
+
+/* Idata index 4 : SP Memory Locator */
+#define SPSS_IDATA_SP_MEMLOC 4
+
+/* Idata index 5 : SP I/O path array */
+#define SPSS_IDATA_SP_IOPATH 5
+
+/* An HDIF array of IO path */
+struct spss_iopath {
+ __be16 iopath_type;
+#define SPSS_IOPATH_TYPE_IOHUB_PHB 0x0001
+#define SPSS_IOPATH_TYPE_PSI 0x0002
+ union {
+ struct {
+ __be16 iohub_chip_inst;
+ __be16 iohub_chip_port;
+ __be16 phb_id;
+ } __packed iohub_phb;
+
+ struct {
+ __be16 link_status;
+#define SPSS_IO_PATH_PSI_LINK_BAD_FRU 0x0000
+#define SPSS_IO_PATH_PSI_LINK_CURRENT 0x0001
+#define SPSS_IO_PATH_PSI_LINK_BACKUP 0x0002
+ u8 ml2_version;
+ u8 reserved;
+ __be16 slca_count;
+ u8 slca_idx[16];
+ __be32 proc_chip_id;
+ __be32 reserved2;
+ __be64 gxhb_base;
+ } __packed psi;
+ };
+} __packed;
+
+/*
+ * IPL Parms structure
+ *
+ */
+
+/* Idata index 0: System Parameters */
+#define IPLPARAMS_SYSPARAMS 0
+
+struct iplparams_sysparams {
+ char sys_model[4];
+ char cpu_feature_code[4];
+ __be32 effective_pvr;
+ __be32 system_type;
+ uint8_t num_lpar_oct[8];
+ __be32 abc_bus_speed;
+ __be32 wxyz_bus_speed;
+ __be32 sys_eco_mode;
+ __be32 sys_attributes;
+ __be32 mem_scrubbing;
+ __be16 cur_spl_value;
+ uint8_t pump_mode;
+ uint8_t use_pore_sleep;
+ __be32 pore_image_size;
+} __packed;
+
+/* Idata index 1: IPL parameters */
+#define IPLPARAMS_IPLPARAMS 1
+
+struct iplparams_iplparams {
+ uint8_t reserved;
+ uint8_t hv_ipl_dest;
+ uint8_t ipl_side;
+#define IPLPARAMS_CEC_FW_IPL_SIDE_TEMP 0x10
+#define IPLPARAMS_FSP_FW_IPL_SIDE_TEMP 0x01
+ uint8_t ipl_speed;
+ __be16 cec_ipl_attrib;
+ uint8_t cec_ipl_maj_type;
+ uint8_t cec_ipl_min_type;
+ uint8_t os_ipl_mode;
+ uint8_t keylock_pos;
+ uint8_t lmb_size;
+ uint8_t deprecated;
+ __be32 max_hsl_opticonnect;
+ __be32 other_attrib;
+#define IPLPARAMS_OATTR_RST_PCI_BUSNO 0x08000000
+#define IPLPARAMS_OATTR_CLEAR_NVRAM 0x04000000
+ __be16 huge_page_count;
+ uint8_t huge_page_size;
+#define IPLPARAMS_HUGE_PG_SIZE_16G 0
+ uint8_t num_vlan_switches;
+ __be64 reserved2;
+};
+
+/* Idata index 4: Platform Dump Descriptor */
+#define IPLPARAMS_PLATFORM_DUMP 4
+
+struct iplparams_dump {
+ __be16 flags;
+ uint8_t reserved1;
+ uint8_t policy;
+#define HYP_DUMP_POLICY_NORMAL 0x00
+ __be32 dump_id;
+ __be64 reserved2;
+ __be64 act_dump_sz;
+ __be32 max_hw_dump_sz;
+ __be32 act_hw_dump_sz;
+ __be32 max_sp_dump_sz;
+ __be32 plid;
+};
+
+/* Idata index 8: serial ports */
+#define IPLPARMS_IDATA_SERIAL 8
+
+/* An HDIF array of serial descriptions */
+struct iplparms_serial {
+ uint8_t loc_code[LOC_CODE_SIZE];
+ __be16 rsrc_id;
+ __be16 flags;
+#define PLPARMS_SERIAL_FLAGS_CALLHOME 0x8000
+} __packed;
+
+/*
+ * Chip TOD structure
+ *
+ * This is an array of 32 entries (I assume per possible chip)
+ */
+
+/* Idata index 0: Chip ID data (array) */
+#define CHIPTOD_IDATA_CHIPID 0
+
+struct chiptod_chipid {
+ __be32 chip_id;
+ __be32 flags;
+#define CHIPTOD_ID_FLAGS_PRIMARY 0x02
+#define CHIPTOD_ID_FLAGS_SECONDARY 0x01
+#define CHIPTOD_ID_FLAGS_STATUS_MASK 0x0c
+#define CHIPTOD_ID_FLAGS_STATUS_OK 0x04
+#define CHIPTOD_ID_FLAGS_STATUS_NOK 0x08
+} __packed;
+
+/* Idata index 0: Chip Initialization data */
+#define CHIPTOD_IDATA_CHIPINIT 1
+
+struct chiptod_chipinit {
+ __be32 ctrl_reg_internal;
+ __be32 tod_ctrl_reg;
+} __packed;
+
+/*
+ * MS VPD - Memory Description Tree
+ *
+ * One such structure pointing to the various memory arrays
+ * along with other infos about the BCRs, Page Mover, XSCOM,...
+ */
+#define MSVPD_HDIF_SIG "MS VPD"
+
+/* Idata index 0: Mainstore address config */
+#define MSVPD_IDATA_MS_ADDR_CONFIG 0
+
+/* Mainstore Address Configuration */
+struct msvpd_ms_addr_config {
+ __be64 max_configured_ms_address;
+ __be64 max_possible_ms_address;
+ __be32 deprecated;
+ __be64 mirrorable_memory_starting_address;
+} __attribute__((packed));
+
+/* Idata index 1: Total configured mainstore */
+#define MSVPD_IDATA_TOTAL_CONFIG_MS 1
+
+struct msvpd_total_config_ms {
+ __be64 total_in_mb;
+};
+
+/* Idata index 2: Page mover and sync structure */
+#define MSVPD_IDATA_PMOVER_SYNCHRO 2
+
+struct msvpd_pmover_bsr_synchro {
+ __be32 flags;
+#define MSVPD_PMS_FLAG_HWLOCK_EN 0x80000000
+#define MSVPD_PMS_FLAG_PMOVER_EN 0x40000000
+#define MSVPD_PMS_FLAG_BSR_EN 0x20000000
+#define MSVPD_PMS_FLAG_XSCOMBASE_VALID 0x10000000
+ /* P7 values for BSR mode */
+#define MSVPD_PMS_FLAG_P7BSR_1M_MODE 0x00000000
+#define MSVPD_PMS_FLAG_P7BSR_2M_MODE 0x02000000
+#define MSVPD_PMS_FLAG_P7BSR_4M_MODE 0x04000000
+#define MSVPD_PMS_FLAG_P7BSR_8M_MODE 0x06000000
+ __be32 hwlocks_per_page;
+ __be64 hwlock_addr;
+ __be64 pmover_addr;
+ __be64 bsr_addr;
+ __be64 xscom_addr;
+
+};
+
+/* Idata index 3: Memory Trace Array */
+
+/* Idata index 4: UE Address Array */
+
+/* Child index 0: MS area child structure */
+#define MSVPD_CHILD_MS_AREAS 0
+
+/*
+ * CEC I/O Hub FRU
+ *
+ * This is an array of CEC Hub FRU HDIF structures
+ *
+ * Each of these has some idata pointers to generic info about the
+ * hub and a possible child pointer for daughter card.
+ *
+ * Actual ports are in the SLCA and need to be cross referenced
+ *
+ * Note that slots meant for the addition of GX+ adapters that
+ * are currently unpopulated but support hotplug will have a
+ * minimum "placeholder" entry, which will be fully populated
+ * when the array is rebuild during concurrent maintainance.
+ * This "placeholder" is called a "reservation".
+ *
+ * WARNING: The array rebuild by concurrent maintainance is not
+ * guaranteed to be in the same order as the IPL array, not is
+ * the order stable between concurrent maintainance operations.
+ *
+ * There's also a child pointer to daugher card structures but
+ * we aren't going to handle that just yet.
+ */
+#define CECHUB_FRU_HDIF_SIG "IO HUB"
+#define IOKID_FRU_HDIF_SIG "IO KID"
+
+/* Idata index 0: FRU ID data
+ *
+ * This is a generic struct spira_fru_id defined above
+ */
+#define CECHUB_FRU_ID_DATA 0
+
+/* Idata index 1: ASCII Keyword VPD */
+#define CECHUB_ASCII_KEYWORD_VPD 1
+
+/* Idata index 2: Hub FRU ID data area */
+#define CECHUB_FRU_ID_DATA_AREA 2
+
+struct cechub_hub_fru_id {
+ __be32 card_type;
+#define CECHUB_FRU_TYPE_IOHUB_RSRV 0
+#define CECHUB_FRU_TYPE_IOHUB_CARD 1
+#define CECHUB_FRU_TYPE_CPU_CARD 2
+#define CECHUB_FRU_TYPE_CEC_BKPLANE 3
+#define CECHUB_FRU_TYPE_BKPLANE_EXT 4
+ __be32 unused;
+ __be16 total_chips;
+ uint8_t flags;
+#define CECHUB_FRU_FLAG_HEADLESS 0x80 /* not connected to CPU */
+#define CECHUB_FRU_FLAG_PASSTHROUGH 0x40 /* connected to passhtrough
+ port of another hub */
+ uint8_t reserved;
+ __be16 parent_hub_id; /* chip instance number of the
+ hub that contains the passthrough
+ port this one is connected to */
+ __be16 reserved2;
+} __packed;
+
+
+/* Idata index 3: IO HUB array */
+
+#define CECHUB_FRU_IO_HUBS 3
+
+/* This is an HDIF array of IO Hub structures */
+struct cechub_io_hub {
+ __be64 fmtc_address;
+ __be32 fmtc_tce_size;
+ __be16 hub_num; /* unique hub number (I/O Hub ID) */
+ uint8_t flags;
+#define CECHUB_HUB_FLAG_STATE_MASK 0xc0
+#define CECHUB_HUB_FLAG_STATE_OK 0x00
+#define CECHUB_HUB_FLAG_STATE_FAILURES 0x40
+#define CECHUB_HUB_FLAG_STATE_NOT_INST 0x80
+#define CECHUB_HUB_FLAG_STATE_UNUSABLE 0xc0
+#define CECHUB_HUB_FLAG_MASTER_HUB 0x20 /* HDAT < v9.x only */
+#define CECHUB_HUB_FLAG_GARD_MASK_VALID 0x08 /* HDAT < v9.x only */
+#define CECHUB_HUB_FLAG_SWITCH_MASK_PDT 0x04 /* HDAT < v9.x only */
+#define CECHUB_HUB_FLAG_FAB_BR0_PDT 0x02 /* HDAT < v9.x only */
+#define CECHUB_HUB_FLAG_FAB_BR1_PDT 0x01 /* HDAT < v9.x only */
+ uint8_t nr_ports; /* HDAT < v9.x only */
+ uint8_t fab_br0_pdt; /* p5ioc2 PCI-X or P8 PHB3's */
+#define CECHUB_HUB_FAB_BR0_PDT_PHB0 0x80
+#define CECHUB_HUB_FAB_BR0_PDT_PHB1 0x40
+#define CECHUB_HUB_FAB_BR0_PDT_PHB2 0x20
+#define CECHUB_HUB_FAB_BR0_PDT_PHB3 0x10
+ uint8_t fab_br1_pdt; /* p5ioc2 & p7ioc PCI-E */
+#define CECHUB_HUB_FAB_BR1_PDT_PHB0 0x80
+#define CECHUB_HUB_FAB_BR1_PDT_PHB1 0x40
+#define CECHUB_HUB_FAB_BR1_PDT_PHB2 0x20
+#define CECHUB_HUB_FAB_BR1_PDT_PHB3 0x10
+#define CECHUB_HUB_FAB_BR1_PDT_PHB4 0x08 /* p7ioc only */
+#define CECHUB_HUB_FAB_BR1_PDT_PHB5 0x04 /* p7ioc only */
+ __be16 iohub_id; /* the type of hub */
+#define CECHUB_HUB_P5IOC2 0x1061 /* from VPL1 */
+#define CECHUB_HUB_P7IOC 0x60e7 /* from VPL3 */
+#define CECHUB_HUB_MURANO 0x20ef /* Murano from spec */
+#define CECHUB_HUB_MURANO_SEGU 0x0001 /* Murano+Seguso from spec */
+#define CECHUB_HUB_VENICE_WYATT 0x0010 /* Venice+Wyatt from spec */
+ __be32 ec_level;
+ __be32 aff_dom2; /* HDAT < v9.x only */
+ __be32 aff_dom3; /* HDAT < v9.x only */
+ __be64 reserved;
+ __be32 proc_chip_id;
+
+ union {
+ /* HDAT < v9.x */
+ struct {
+ __be32 gx_index; /* GX bus index on cpu */
+ __be32 buid_ext; /* BUID Extension */
+ __be32 xscom_chip_id; /* TORRENT ONLY */
+ };
+ /* HDAT >= v9.x */
+ struct {
+ __be32 reserved1;
+ __be32 reserved2;
+ __be16 reserved3;
+ __be16 hw_topology;
+ };
+ };
+ __be32 mrid;
+ __be32 mem_map_vers;
+ union {
+ /* HDAT < v9.x */
+ struct {
+ __be64 gx_ctrl_bar0;
+ __be64 gx_ctrl_bar1;
+ __be64 gx_ctrl_bar2;
+ __be64 gx_ctrl_bar3;
+ __be64 gx_ctrl_bar4;
+ __be32 sw_mask_pdt;
+ __be16 gard_mask;
+ __be16 gx_bus_speed; /* Version 0x58 */
+ };
+
+ /* HDAT >= v9.x, HDIF version 0x6A or later */
+ struct {
+ /* 4 values per PHB, 4 PHBs */
+ __be64 phb_lane_eq[4][4];
+ };
+ };
+} __packed;
+
+/* We support structures as small as 0x68 bytes */
+#define CECHUB_IOHUB_MIN_SIZE 0x68
+
+/* Child index 0: IO Daugther Card */
+#define CECHUB_CHILD_IO_KIDS 0
+
+/*
+ * IO KID is a dauther card structure
+ */
+#define IOKID_FRU_ID_DATA 0
+#define IOKID_KW_VPD 1
+
+/*
+ * Slot Location Code Array (aka SLCA)
+ *
+ * This is a pile of location codes referenced by various other
+ * structures such as the IO Hubs for things on the CEC. Not
+ * everything in there is a physical port. The SLCA is actually
+ * a tree which represent the topology of the system.
+ *
+ * The tree works as follow: A parent has a pointer to the first
+ * child. A child has a pointer to its parent. Siblings are
+ * consecutive entries.
+ *
+ * Note: If we ever support concurrent maintainance... this is
+ * completely rebuilt, invalidating all indices, though other
+ * structures that may reference SLCA by index will be rebuilt
+ * as well.
+ *
+ * Note that a lot of that stuff is based on VPD documentation
+ * such as the identification keywords. I will list the ones
+ * I manage to figure out without the doc separately.
+ */
+#define SLCA_HDIF_SIG "SLCA "
+
+/* Idata index 0 : SLCA root pointer
+ *
+ * The SLCA array is an HDIF array of all the entries. The tree
+ * structure is based on indices inside the entries and order of
+ * the entries
+ */
+#define SLCA_IDATA_ARRAY 0
+
+/* Note: An "index" (or idx) is always an index into the SLCA array
+ * and "id" is a reference to some other object.
+ */
+struct slca_entry {
+ __be16 my_index; /* redundant, useful */
+ __be16 rsrc_id; /* formerly VPD port number */
+ uint8_t fru_id[2]; /* ASCII VPD ID */
+#define SLCA_ROOT_VPD_ID VPD_ID('V','V')
+#define SLCA_SYSTEM_VPD_ID VPD_ID('S','V')
+ __be16 parent_index; /* Parent entry index */
+ uint8_t flags;
+#define SLCA_FLAG_NON_FUNCTIONAL 0x02 /* For redundant entries */
+#define SLCA_FLAG_IMBEDDED 0x01 /* not set => pluggable */
+ uint8_t old_nr_child; /* Legacy: Nr of children */
+ __be16 child_index; /* First child index */
+ __be16 child_rsrc_id; /* Resource ID of first child */
+ uint8_t loc_code_allen; /* Alloc len of loc code */
+ uint8_t loc_code_len; /* Loc code len */
+ uint8_t loc_code[LOC_CODE_SIZE]; /* NULL terminated (thus max 79 chr) */
+ __be16 first_dup_idx; /* First redundant resource index */
+ uint8_t nr_dups; /* Number of redundant entries */
+ uint8_t reserved;
+ __be16 nr_child; /* New version */
+ uint8_t install_indic; /* Installed indicator */
+#define SLCA_INSTALL_NO_HW_PDT 1 /* No HW presence detect */
+#define SLCA_INSTALL_INSTALLED 2
+#define SLCA_INSTALL_NOT_INSTALLED 3
+ uint8_t vpd_collected;
+#define SLCA_VPD_COLLECTED 2
+#define SLCA_VPD_NOT_COLLECTED 3
+} __packed;
+
+/*
+ * System VPD
+ */
+#define SYSVPD_HDIF_SIG "SYSVPD"
+
+/* Idata index 0 : FRU ID Data */
+#define SYSVPD_IDATA_FRU_ID 0
+
+/* Idata index 1 : Keyword VPD */
+#define SYSVPD_IDATA_KW_VPD 1
+
+/* Idata index 2 : Operational status */
+#define SYSVPD_IDATA_OP_STATUS 2
+
+/*
+ * FRU keyword VPD structure
+ */
+#define FRUVPD_HDIF_SIG "FRUVPD"
+
+/* Idata index 0 : FRU ID Data */
+#define FRUVPD_IDATA_FRU_ID 0
+
+/* Idata index 1 : Keyword VPD */
+#define FRUVPD_IDATA_KW_VPD 1
+
+/* Idata index 2 : Operational status */
+#define FRUVPD_IDATA_OP_STATUS 2
+
+
+/*
+ * SPPACA structure. The SPIRA contain an array of these, one
+ * per processor thread
+ */
+#define PACA_HDIF_SIG "SPPACA"
+
+/* Idata index 0 : FRU ID Data */
+#define SPPACA_IDATA_FRU_ID 0
+
+/* Idata index 1 : Keyword VPD */
+#define SPPACA_IDATA_KW_VPD 1
+
+/* Idata index 2 : CPU ID data area */
+#define SPPACA_IDATA_CPU_ID 2
+
+struct sppaca_cpu_id {
+ __be32 pir;
+ __be32 fru_id;
+ __be32 hardware_proc_id;
+#define CPU_ID_VERIFY_MASK 0xC0000000
+#define CPU_ID_VERIFY_SHIFT 30
+#define CPU_ID_VERIFY_USABLE_NO_FAILURES 0
+#define CPU_ID_VERIFY_USABLE_FAILURES 1
+#define CPU_ID_VERIFY_NOT_INSTALLED 2
+#define CPU_ID_VERIFY_UNUSABLE 3
+#define CPU_ID_SECONDARY_THREAD 0x20000000
+#define CPU_ID_PACA_RESERVED 0x10000000
+#define CPU_ID_NUM_SECONDARY_THREAD_MASK 0x00FF0000
+#define CPU_ID_NUM_SECONDARY_THREAD_SHIFT 16
+ __be32 verify_exists_flags;
+ __be32 chip_ec_level;
+ __be32 processor_chip_id;
+ __be32 logical_processor_id;
+ /* This is the resource number, too. */
+ __be32 process_interrupt_line;
+ __be32 reserved1;
+ __be32 hardware_module_id;
+ __be64 ibase;
+ __be32 deprecated1;
+ __be32 physical_thread_id;
+ __be32 deprecated2;
+ __be32 ccm_node_id;
+ /* This fields are not always present, check struct size */
+#define SPIRA_CPU_ID_MIN_SIZE 0x40
+ __be32 hw_card_id;
+ __be32 internal_drawer_node_id;
+ __be32 drawer_book_octant_blade_id;
+ __be32 memory_interleaving_scope;
+ __be32 lco_target;
+} __packed;
+
+/* Idata index 3 : Timebase data */
+#define SPPACA_IDATA_TIMEBASE 3
+
+struct sppaca_cpu_timebase {
+ __be32 cycle_time;
+ __be32 time_base;
+ __be32 actual_clock_speed;
+ __be32 memory_bus_frequency;
+} __packed;
+
+/* Idata index 4 : Cache size structure */
+#define SPPACA_IDATA_CACHE_SIZE 4
+
+struct sppaca_cpu_cache {
+ __be32 icache_size_kb;
+ __be32 icache_line_size;
+ __be32 l1_dcache_size_kb;
+ __be32 l1_dcache_line_size;
+ __be32 l2_dcache_size_kb;
+ __be32 l2_line_size;
+ __be32 l3_dcache_size_kb;
+ __be32 l3_line_size;
+ __be32 dcache_block_size;
+ __be32 icache_block_size;
+ __be32 dcache_assoc_sets;
+ __be32 icache_assoc_sets;
+ __be32 dtlb_entries;
+ __be32 dtlb_assoc_sets;
+ __be32 itlb_entries;
+ __be32 itlb_assoc_sets;
+ __be32 reservation_size;
+ __be32 l2_cache_assoc_sets;
+ __be32 l35_dcache_size_kb;
+ __be32 l35_cache_line_size;
+};
+
+/* Idata index 6 : CPU Attributes */
+#define SPPACA_IDATA_CPU_ATTR 6
+
+#define sppaca_cpu_attr sppcia_cpu_attr
+
+/*
+ * SPPCIA structure. The SPIRA contain an array of these, one
+ * per processor core
+ */
+#define SPPCIA_HDIF_SIG "SPPCIA"
+
+/* Idata index 0 : Core unique data */
+#define SPPCIA_IDATA_CORE_UNIQUE 0
+
+/* NOTE: This is the same layout as "struct sppaca_cpu_id",
+ * with essentially some fields removed and a reserved
+ * field added
+ */
+struct sppcia_core_unique {
+ __be32 reserved;
+ __be32 proc_fru_id;
+ __be32 hw_proc_id;
+ __be32 verif_exist_flags; /* Same as PACA */
+ __be32 chip_ec_level;
+ __be32 proc_chip_id;
+ __be32 reserved2;
+ __be32 reserved3;
+ __be32 reserved4;
+ __be32 hw_module_id;
+ __be64 reserved5;
+ __be32 reserved6;
+ __be32 reserved7;
+ __be32 reserved8;
+ __be32 ccm_node_id;
+ __be32 hw_card_id;
+ __be32 internal_drawer_node_id;
+ __be32 drawer_book_octant_blade_id;
+ __be32 memory_interleaving_scope;
+ __be32 lco_target;
+ __be32 reserved9;
+} __packed;
+
+/* Idata index 1 : CPU Time base structure */
+#define SPPCIA_IDATA_TIMEBASE 1
+
+#define sppcia_cpu_timebase sppaca_cpu_timebase
+
+/* Idata index 2 : CPU Cache Size Structure */
+#define SPPCIA_IDATA_CPU_CACHE 2
+
+#define sppcia_cpu_cache sppaca_cpu_cache
+
+/* Idata index 3 : Thread Array Data
+ *
+ * HDIF array of
+ */
+#define SPPCIA_IDATA_THREAD_ARRAY 3
+
+struct sppcia_cpu_thread {
+ __be32 proc_int_line;
+ __be32 phys_thread_id;
+ __be64 ibase;
+ __be32 pir;
+} __packed;
+
+/* Idata index 4 : CPU Attributes */
+#define SPPCIA_IDATA_CPU_ATTR 4
+
+struct sppcia_cpu_attr {
+#define CPU_ATTR_UNIFIED_PL1 0x80
+#define CPU_ATTR_SPLIT_TLB 0x40
+#define CPU_ATTR_TLBIA 0x20
+#define CPU_ATTR_PERF_MONITOR 0x10
+#define CPU_ATTR_EXTERN_CONT 0x02
+ __be32 attr;
+} __packed;
+
+/*
+ * Processor Chip Related Data. The SPIRA contain an array of these, one
+ * per chip
+ */
+#define SPPCRD_HDIF_SIG "SPPCRD"
+
+/* Idata index 0 : Chip info */
+#define SPPCRD_IDATA_CHIP_INFO 0
+
+struct sppcrd_chip_info {
+ __be32 proc_chip_id;
+ __be32 verif_exist_flags;
+#define CHIP_VERIFY_MASK 0xC0000000
+#define CHIP_VERIFY_SHIFT 30
+#define CHIP_VERIFY_USABLE_NO_FAILURES 0
+#define CHIP_VERIFY_USABLE_FAILURES 1
+#define CHIP_VERIFY_NOT_INSTALLED 2
+#define CHIP_VERIFY_UNUSABLE 3
+ __be32 nx_state;
+ __be32 pore_state;
+ __be32 xscom_id;
+ /* Version 0xA */
+ __be32 reserved;
+ __be32 dbob_id;
+ __be32 occ_state;
+} __packed;
+
+/* Idata index 1 : Chip TOD */
+#define SPPCRD_IDATA_CHIP_TOD 1
+
+struct sppcrd_chip_tod {
+ __be32 flags;
+ /* CHIPTOD_ID_... values */
+ __be32 ctrl_reg_internal;
+ __be32 tod_ctrl_reg;
+} __packed;
+
+/* Idata index 2 : FRU ID */
+#define SPPCRD_IDATA_FRU_ID 2
+
+/* Idata index 3 : ASCII Keyword data */
+#define SPPCRD_IDATA_KW_VPD 3
+
+/* Idata index 4 : Module VPD */
+#define SPPCRD_IDATA_MODULE_VPD 4
+
+
+/*
+ * Host Services Data.
+ */
+#define HSERV_HDIF_SIG "HOSTSR"
+
+/* Idata index 0 : System attribute data */
+#define HSERV_IDATA_SYS_ATTR 0
+
+static inline const char *cpu_state(u32 flags)
+{
+ switch ((flags & CPU_ID_VERIFY_MASK) >> CPU_ID_VERIFY_SHIFT) {
+ case CPU_ID_VERIFY_USABLE_NO_FAILURES:
+ return "OK";
+ case CPU_ID_VERIFY_USABLE_FAILURES:
+ return "FAILURES";
+ case CPU_ID_VERIFY_NOT_INSTALLED:
+ return "NOT-INSTALLED";
+ case CPU_ID_VERIFY_UNUSABLE:
+ return "UNUSABLE";
+ }
+ return "**UNKNOWN**";
+}
+#endif /* __SPIRA_H */
diff --git a/hdata/test/Makefile.check b/hdata/test/Makefile.check
new file mode 100644
index 0000000..e03a021
--- /dev/null
+++ b/hdata/test/Makefile.check
@@ -0,0 +1,18 @@
+# -*-Makefile-*-
+
+check: hdata-check
+
+# Add some test ntuples for open source version...
+hdata-check: hdata/test/hdata_to_dt
+# $(VALGRIND) hdata/test/hdata_to_dt -q hdata/test/spira.bin hdata/test/ntuples.bin
+
+hdata/test/stubs.o: hdata/test/stubs.c
+ $(HOSTCC) $(HOSTCFLAGS) -g -c -o $@ $<
+
+hdata/test/hdata_to_dt: hdata/test/hdata_to_dt.c hdata/test/stubs.o
+ $(HOSTCC) $(HOSTCFLAGS) -O0 -g -I hdata -I include -I . -I libfdt -o $@ $< hdata/test/stubs.o
+
+clean: hdata-test-clean
+
+hdata-test-clean:
+ $(RM) hdata/test/*.o hdata/test/hdata_to_dt
diff --git a/hdata/test/hdata_to_dt.c b/hdata/test/hdata_to_dt.c
new file mode 100644
index 0000000..4215740
--- /dev/null
+++ b/hdata/test/hdata_to_dt.c
@@ -0,0 +1,215 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Given a hdata dump, output the device tree. */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdint.h>
+
+#include <interrupts.h>
+
+/* Our actual map. */
+static void *spira_heap;
+static size_t spira_heap_size;
+static uint64_t base_addr;
+
+/* Override ntuple_addr. */
+#define ntuple_addr ntuple_addr
+struct spira_ntuple;
+static void *ntuple_addr(const struct spira_ntuple *n);
+
+/* Stuff which core expects. */
+#define __this_cpu ((struct cpu_thread *)NULL)
+#define zalloc(expr) calloc(1, (expr))
+
+/* Don't include processor-specific stuff. */
+#define __PROCESSOR_H
+#define PVR_TYPE(_pvr) _pvr
+
+/* PVR definitions */
+#define PVR_TYPE_P7 0x003f
+#define PVR_TYPE_P7P 0x004a
+#define PVR_TYPE_P8E 0x004b
+#define PVR_TYPE_P8 0x004d
+
+#define SPR_PVR 0x11f /* RO: Processor version register */
+
+#define __CPU_H
+struct cpu_thread {
+ uint32_t pir;
+};
+
+struct cpu_thread __boot_cpu, *boot_cpu = &__boot_cpu;
+static unsigned long fake_pvr_type = PVR_TYPE_P7;
+
+static inline unsigned long mfspr(unsigned int spr)
+{
+ assert(spr == SPR_PVR);
+ return fake_pvr_type;
+}
+
+struct dt_node *add_ics_node(void)
+{
+ return NULL;
+}
+
+#include <config.h>
+#include <bitutils.h>
+
+/* Your pointers won't be correct, that's OK. */
+#define spira_check_ptr(ptr, file, line) ((ptr) != NULL)
+
+#include "../cpu-common.c"
+#include "../fsp.c"
+#include "../hdif.c"
+#include "../iohub.c"
+#include "../memory.c"
+#include "../paca.c"
+#include "../pcia.c"
+#include "../spira.c"
+#include "../vpd.c"
+#include "../vpd-common.c"
+#include "../slca.c"
+#include "../hostservices.c"
+#include "../../core/vpd.c"
+#include "../../core/device.c"
+#include "../../core/chip.c"
+
+#include <err.h>
+
+char __rodata_start[1], __rodata_end[1];
+
+enum proc_gen proc_gen = proc_gen_p7;
+
+static void *ntuple_addr(const struct spira_ntuple *n)
+{
+ uint64_t addr = be64_to_cpu(n->addr);
+ if (n->addr == 0)
+ return NULL;
+ assert(addr >= base_addr);
+ assert(addr < base_addr + spira_heap_size);
+ return spira_heap + ((unsigned long)addr - base_addr);
+}
+
+static void indent_num(unsigned indent)
+{
+ unsigned int i;
+
+ for (i = 0; i < indent; i++)
+ putc(' ', stdout);
+}
+
+static void dump_val(const void *prop, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ printf("%02x ", ((unsigned char *)prop)[i]);
+}
+
+/* Make sure valgrind knows these are undefined bytes. */
+static void undefined_bytes(void *p, size_t len)
+{
+ void *undef = malloc(len);
+ memcpy(p, undef, len);
+ free(undef);
+}
+
+static void dump_dt(const struct dt_node *root, unsigned indent)
+{
+ const struct dt_node *i;
+ const struct dt_property *p;
+
+ list_for_each(&root->properties, p, list) {
+ indent_num(indent);
+ printf("prop: %s size: %zu val: ", p->name, p->len);
+ dump_val(p->prop, p->len);
+ printf("\n");
+ }
+
+ list_for_each(&root->children, i, list)
+ dump_dt(i, indent + 2);
+}
+
+int main(int argc, char *argv[])
+{
+ int fd, r;
+ bool verbose = false, quiet = false;
+
+ while (argv[1]) {
+ if (strcmp(argv[1], "-v") == 0) {
+ verbose = true;
+ argv++;
+ argc--;
+ } else if (strcmp(argv[1], "-q") == 0) {
+ quiet = true;
+ argv++;
+ argc--;
+ } else
+ break;
+ }
+
+ if (argc != 3)
+ errx(1, "Usage: hdata [-v|-q] <spira-dump> <heap-dump>");
+
+ /* Copy in spira dump (assumes little has changed!). */
+ fd = open(argv[1], O_RDONLY);
+ if (fd < 0)
+ err(1, "opening %s", argv[1]);
+ r = read(fd, &spira, sizeof(spira));
+ if (r < sizeof(spira.hdr))
+ err(1, "reading %s gave %i", argv[1], r);
+ if (verbose)
+ printf("verbose: read spira %u bytes\n", r);
+ close(fd);
+
+ undefined_bytes((void *)&spira + r, sizeof(spira) - r);
+
+ base_addr = be64_to_cpu(spira.ntuples.heap.addr);
+ if (!base_addr)
+ errx(1, "Invalid base addr");
+ if (verbose)
+ printf("verbose: map.base_addr = %llx\n", (long long)base_addr);
+
+ fd = open(argv[2], O_RDONLY);
+ if (fd < 0)
+ err(1, "opening %s", argv[2]);
+ spira_heap_size = lseek(fd, 0, SEEK_END);
+ spira_heap = mmap(NULL, spira_heap_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (spira_heap == MAP_FAILED)
+ err(1, "mmaping %s", argv[3]);
+ if (verbose)
+ printf("verbose: mapped %zu at %p\n",
+ spira_heap_size, spira_heap);
+ close(fd);
+
+ if (quiet) {
+ fclose(stdout);
+ fclose(stderr);
+ }
+
+ parse_hdat(false, 0);
+
+ if (!quiet)
+ dump_dt(dt_root, 0);
+
+ dt_free(dt_root);
+ return 0;
+}
diff --git a/hdata/test/stubs.c b/hdata/test/stubs.c
new file mode 100644
index 0000000..8e1bd1b
--- /dev/null
+++ b/hdata/test/stubs.c
@@ -0,0 +1,47 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Add any stub functions required for linking here. */
+#include <stdlib.h>
+
+static void stub_function(void)
+{
+ abort();
+}
+
+#define STUB(fnname) \
+ void fnname(void) __attribute__((weak, alias ("stub_function")))
+
+STUB(fdt_begin_node);
+STUB(fdt_property);
+STUB(fdt_end_node);
+STUB(fdt_create);
+STUB(fdt_add_reservemap_entry);
+STUB(fdt_finish_reservemap);
+STUB(fdt_strerror);
+STUB(fdt_check_header);
+STUB(_fdt_check_node_offset);
+STUB(fdt_next_tag);
+STUB(fdt_string);
+STUB(fdt_get_name);
+STUB(dt_first);
+STUB(dt_next);
+STUB(dt_has_node_property);
+STUB(dt_get_address);
+STUB(op_display);
+STUB(fsp_fetch_data);
+STUB(get_ics_phandle);
+STUB(get_psi_interrupt);
+STUB(fsp_adjust_lid_side);
diff --git a/hdata/vpd-common.c b/hdata/vpd-common.c
new file mode 100644
index 0000000..eecda7e
--- /dev/null
+++ b/hdata/vpd-common.c
@@ -0,0 +1,38 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <vpd.h>
+#include <string.h>
+#include <device.h>
+
+static const struct machine_info machine_table[] = {
+ {"8247-21L", "IBM Power System S812L"},
+ {"8247-22L", "IBM Power System S822L"},
+ {"8247-24L", "IBM Power System S824L"},
+ {"8286-41A", "IBM Power System S814"},
+ {"8286-22A", "IBM Power System S822"},
+ {"8286-42A", "IBM Power System S824"},
+};
+
+const struct machine_info *machine_info_lookup(char *mtm)
+{
+ int i;
+ for(i = 0; i < ARRAY_SIZE(machine_table); i++)
+ if (!strcmp(machine_table[i].mtm, mtm))
+ return &machine_table[i];
+ return NULL;
+}
diff --git a/hdata/vpd.c b/hdata/vpd.c
new file mode 100644
index 0000000..e568a06
--- /dev/null
+++ b/hdata/vpd.c
@@ -0,0 +1,851 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <vpd.h>
+#include <string.h>
+#include "spira.h"
+#include "hdata.h"
+#include <device.h>
+#include "hdata.h"
+
+struct card_info {
+ const char *ccin; /* Customer card identification number */
+ const char *description;
+};
+
+static const struct card_info card_table[] = {
+ {"2B06", "System planar 2S4U"},
+ {"2B07", "System planar 1S4U"},
+ {"2B2E", "System planar 2S2U"},
+ {"2B2F", "System planar 1S2U"},
+ {"2CD4", "System planar 2S4U"},
+ {"2CD5", "System planar 1S4U"},
+ {"2CD6", "System planar 2S2U"},
+ {"2CD7", "System planar 1S2U"},
+ {"2CD7", "System planar 1S2U"},
+ {"2B09", "Base JBOD, RAID and Backplane HD"},
+ {"57D7", "Split JBOD, RAID Card"},
+ {"2B0B", "Native I/O Card"},
+
+ /* Anchor cards */
+ {"52FE", "System Anchor Card - IBM Power 824"},
+ {"52F2", "System Anchor Card - IBM Power 814"},
+ {"52F5", "System Anchor Card - IBM Power 822"},
+ {"561A", "System Anchor Card - IBM Power 824L"},
+ {"524D", "System Anchor Card - IBM Power 822L"},
+ {"560F", "System Anchor Card - IBM Power 812L"},
+ {"561C", "System Anchor Card - DS8870"},
+
+ /* Memory DIMMs */
+ {"31E0", "16GB CDIMM"},
+ {"31E8", "16GB CDIMM"},
+ {"31E1", "32GB CDIMM"},
+ {"31E9", "32GB CDIMM"},
+ {"31E2", "64GB CDIMM"},
+ {"31EA", "64GB CDIMM"},
+
+ /* Power supplies */
+ {"2B1D", "Power Supply 900W AC"},
+ {"2B1E", "Power Supply 1400W AC"},
+ {"2B75", "Power Supply 1400W HVDC"},
+
+ /* Fans */
+ {"2B1F", "Fan 4U (A1, A2, A3, A4)"},
+ {"2B29", "Fan 2U (A1, A2, A3, A4, A5, A6)"},
+
+ /* Other cards */
+};
+
+static const struct card_info *card_info_lookup(char *ccin)
+{
+ int i;
+ for(i = 0; i < ARRAY_SIZE(card_table); i++)
+ if (!strcmp(card_table[i].ccin, ccin))
+ return &card_table[i];
+ return NULL;
+}
+
+static void vpd_vini_parse(struct dt_node *node,
+ const void *fruvpd, unsigned int fruvpd_sz)
+{
+ const void *kw;
+ char *str;
+ uint8_t kwsz;
+ const struct card_info *cinfo;
+
+ /* FRU Stocking Part Number */
+ kw = vpd_find(fruvpd, fruvpd_sz, "VINI", "FN", &kwsz);
+ if (kw) {
+ str = zalloc(kwsz + 1);
+ if (!str)
+ goto no_memory;
+ memcpy(str, kw, kwsz);
+ dt_add_property_string(node, "fru-number", str);
+ free(str);
+ }
+
+ /* Serial Number */
+ kw = vpd_find(fruvpd, fruvpd_sz, "VINI", "SN", &kwsz);
+ if (kw) {
+ str = zalloc(kwsz + 1);
+ if (!str)
+ goto no_memory;
+ memcpy(str, kw, kwsz);
+ dt_add_property_string(node, "serial-number", str);
+ free(str);
+ }
+
+ /* Part Number */
+ kw = vpd_find(fruvpd, fruvpd_sz, "VINI", "PN", &kwsz);
+ if (kw) {
+ str = zalloc(kwsz + 1);
+ if (!str)
+ goto no_memory;
+ memcpy(str, kw, kwsz);
+ dt_add_property_string(node, "part-number", str);
+ free(str);
+ }
+
+ /* Customer Card Identification Number (CCIN) */
+ kw = vpd_find(fruvpd, fruvpd_sz, "VINI", "CC", &kwsz);
+ if (kw) {
+ str = zalloc(kwsz + 1);
+ if (!str)
+ goto no_memory;
+ memcpy(str, kw, kwsz);
+ dt_add_property_string(node, "ccin", str);
+ cinfo = card_info_lookup(str);
+ if (cinfo)
+ dt_add_property_string(node,
+ "description", cinfo->description);
+ free(str);
+ }
+ return;
+no_memory:
+ prerror("VPD: memory allocation failure in VINI parsing\n");
+}
+
+static const char *vpd_map_name(const char *vpd_name)
+{
+ /* vpd_name is a 2 char array */
+ switch (vpd_name[0]) {
+ case 'A':
+ switch (vpd_name[1]) {
+ case 'A':
+ return "ac-power-supply";
+ case 'M':
+ return "air-mover";
+ case 'V':
+ return "anchor-card";
+ }
+ break;
+ case 'B':
+ switch (vpd_name[1]) {
+ case 'A':
+ return "bus-adapter-card";
+ case 'C':
+ return "battery-charger";
+ case 'D':
+ return "bus-daughter-card";
+ case 'E':
+ return "bus-expansion-card";
+ case 'P':
+ return "backplane";
+ case 'R':
+ return "backplane-riser";
+ case 'X':
+ return "backplane-extender";
+ }
+ break;
+ case 'C':
+ switch (vpd_name[1]) {
+ case 'A':
+ return "calgary-bridge";
+ case 'B':
+ return "infiniband-connector";
+ case 'C':
+ return "clock-card";
+ case 'D':
+ return "card-connector";
+ case 'E':
+ return "ethernet-connector";
+ case 'L':
+ return "calgary-phb";
+ case 'I':
+ return "capacity-card";
+ case 'O':
+ return "sma-connector";
+ case 'P':
+ return "processor-capacity-card";
+ case 'R':
+ return "rio-connector";
+ case 'S':
+ return "serial-connector";
+ case 'U':
+ return "usb-connector";
+ }
+ break;
+ case 'D':
+ switch (vpd_name[1]) {
+ case 'B':
+ return "dasd-backplane";
+ case 'C':
+ return "drawer-card";
+ case 'E':
+ return "drawer-extension";
+ case 'I':
+ return "drawer-interposer";
+ case 'L':
+ return "p7ih-dlink-connector";
+ case 'T':
+ return "legacy-pci-card";
+ case 'V':
+ return "media-drawer-led";
+ }
+ break;
+ case 'E':
+ switch (vpd_name[1]) {
+ case 'I':
+ return "enclosure-led";
+ case 'F':
+ return "enclosure-fault-led";
+ case 'S':
+ return "embedded-sas";
+ case 'T':
+ return "ethernet-riser";
+ case 'V':
+ return "enclosure";
+ }
+ break;
+ case 'F':
+ switch (vpd_name[1]) {
+ case 'M':
+ return "frame";
+ }
+ break;
+ case 'H':
+ switch (vpd_name[1]) {
+ case 'B':
+ return "host-rio-pci-card";
+ case 'D':
+ return "high-speed-card";
+ case 'M':
+ return "hmc-connector";
+ }
+ break;
+ case 'I':
+ switch (vpd_name[1]) {
+ case 'B':
+ return "io-backplane";
+ case 'C':
+ return "io-card";
+ case 'D':
+ return "ide-connector";
+ case 'I':
+ return "io-drawer-led";
+ case 'P':
+ return "interplane-card";
+ case 'S':
+ return "smp-vbus-cable";
+ case 'T':
+ return "enclosure-cable";
+ case 'V':
+ return "io-enclosure";
+ }
+ break;
+ case 'K':
+ switch (vpd_name[1]) {
+ case 'V':
+ return "keyboard-led";
+ }
+ break;
+ case 'L':
+ switch (vpd_name[1]) {
+ case '2':
+ return "l2-cache-module";
+ case '3':
+ return "l3-cache-module";
+ case 'C':
+ return "squadrons-light-connector";
+ case 'R':
+ return "p7ih-connector";
+ case 'O':
+ return "system-locate-led";
+ case 'T':
+ return "squadrons-light-strip";
+ }
+ break;
+ case 'M':
+ switch (vpd_name[1]) {
+ case 'B':
+ return "media-backplane";
+ case 'E':
+ return "map-extension";
+ case 'M':
+ return "mip-meter";
+ case 'S':
+ return "ms-dimm";
+ }
+ break;
+ case 'N':
+ switch (vpd_name[1]) {
+ case 'B':
+ return "nvram-battery";
+ case 'C':
+ return "sp-node-controller";
+ case 'D':
+ return "numa-dimm";
+ }
+ break;
+ case 'O':
+ switch (vpd_name[1]) {
+ case 'D':
+ return "cuod-card";
+ case 'P':
+ return "op-panel";
+ case 'S':
+ return "oscillator";
+ }
+ break;
+ case 'P':
+ switch (vpd_name[1]) {
+ case '2':
+ return "ioc";
+ case '5':
+ return "ioc-bridge";
+ case 'B':
+ return "io-drawer-backplane";
+ case 'C':
+ return "power-capacitor";
+ case 'D':
+ return "processor-card";
+ case 'F':
+ return "processor";
+ case 'I':
+ return "ioc-phb";
+ case 'O':
+ return "spcn";
+ case 'N':
+ return "spcn-connector";
+ case 'R':
+ return "pci-riser-card";
+ case 'S':
+ return "power-supply";
+ case 'T':
+ return "pass-through-card";
+ case 'X':
+ return "psc-sync-card";
+ case 'W':
+ return "power-connector";
+ }
+ break;
+ case 'R':
+ switch (vpd_name[1]) {
+ case 'G':
+ return "regulator";
+ case 'I':
+ return "riser";
+ case 'K':
+ return "rack-indicator";
+ case 'W':
+ return "riscwatch-connector";
+ }
+ break;
+ case 'S':
+ switch (vpd_name[1]) {
+ case 'A':
+ return "sys-attn-led";
+ case 'B':
+ return "backup-sysvpd";
+ case 'C':
+ return "scsi-connector";
+ case 'D':
+ return "sas-connector";
+ case 'I':
+ return "scsi-ide-converter";
+ case 'L':
+ return "phb-slot";
+ case 'P':
+ return "service-processor";
+ case 'R':
+ return "service-card";
+ case 'S':
+ return "soft-switch";
+ case 'V':
+ return "system-vpd";
+ case 'Y':
+ return "legacy-sysvpd";
+ }
+ break;
+ case 'T':
+ switch (vpd_name[1]) {
+ case 'D':
+ return "tod-clock";
+ case 'I':
+ return "torrent-pcie-phb";
+ case 'L':
+ return "torrent-riser";
+ case 'M':
+ return "thermal-sensor";
+ case 'P':
+ return "tpmd-adapter";
+ case 'R':
+ return "torrent-bridge";
+ }
+ break;
+ case 'V':
+ switch (vpd_name[1]) {
+ case 'V':
+ return "root-node-vpd";
+ }
+ break;
+ case 'W':
+ switch (vpd_name[1]) {
+ case 'D':
+ return "water_device";
+ }
+ break;
+ }
+ return "Unknown";
+}
+
+static bool valid_child_entry(const struct slca_entry *entry)
+{
+ if (!entry)
+ return false;
+
+ /*
+ * Skip entries with independent ntuple FRUVPD/MSVPD, etc.,
+ * representations, since they have a unique PN, FN, SN, et al.
+ * We add details for those devices via the ntuple walk.
+ */
+ switch (entry->fru_id[0]) {
+ case 'A':
+ switch (entry->fru_id[1]) {
+ case 'V': /* AV */
+ return false;
+ }
+ break;
+ case 'B':
+ switch (entry->fru_id[1]) {
+ case 'P': /* BP */
+ case 'X': /* BX */
+ return false;
+ }
+ break;
+ case 'C':
+ switch (entry->fru_id[1]) {
+ case 'C': /* CC */
+ return false;
+ }
+ break;
+ case 'D':
+ switch (entry->fru_id[1]) {
+ case 'B': /* DB */
+ return false;
+ }
+ break;
+ case 'E':
+ switch (entry->fru_id[1]) {
+ case 'V': /* EV */
+ return false;
+ }
+ break;
+ case 'M':
+ switch (entry->fru_id[1]) {
+ case 'S': /* MS */
+ return false;
+ }
+ break;
+ case 'O':
+ switch (entry->fru_id[1]) {
+ case 'P': /* OP */
+ return false;
+ }
+ break;
+ case 'R':
+ switch (entry->fru_id[1]) {
+ case 'I': /* RI */
+ return false;
+ }
+ break;
+ case 'P':
+ switch (entry->fru_id[1]) {
+ case '2': /* P2 */
+ case '5': /* P5 */
+ case 'F': /* PF */
+ return false;
+ }
+ break;
+ case 'S':
+ switch (entry->fru_id[1]) {
+ case 'P': /* SP */
+ return false;
+ }
+ break;
+ case 'T':
+ switch (entry->fru_id[1]) {
+ case 'P': /* TP */
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if ((entry->install_indic == SLCA_INSTALL_INSTALLED) &&
+ (entry->vpd_collected == SLCA_VPD_COLLECTED))
+ return true;
+
+ return false;
+}
+
+static void vpd_add_children(struct dt_node *parent, uint16_t slca_index)
+{
+ const struct slca_entry *s_entry, *child;
+ uint16_t current_child_index, max_index;
+
+ s_entry = slca_get_entry(slca_index);
+ if (!s_entry || (s_entry->nr_child == 0))
+ return;
+
+ /*
+ * This slca_entry has children. Parse the children array
+ * and add nodes for valid entries.
+ *
+ * A child entry is valid if all of the following criteria is met
+ * a. SLCA_INSTALL_INSTALLED is set in s_entry->install_indic
+ * b. SLCA_VPD_COLLECTED is set in s_entry->vpd_collected
+ * c. The SLCA is not a duplicate entry.
+ */
+
+ /* current_index tracks where we are right now in the array */
+ current_child_index = be16_to_cpu(s_entry->child_index);
+
+ /* max_index tracks how far down the array we must traverse */
+ max_index = be16_to_cpu(s_entry->child_index)
+ + be16_to_cpu(s_entry->nr_child);
+
+ while (current_child_index < max_index) {
+ child = slca_get_entry(current_child_index);
+ if (!child)
+ return;
+
+ if (valid_child_entry(child)) {
+ const char *name;
+ uint64_t addr;
+ struct dt_node *node;
+
+ /* create new node, add location code */
+ name = vpd_map_name(child->fru_id);
+ addr = (uint64_t)be16_to_cpu(child->rsrc_id);
+ node = dt_new_addr(parent, name, addr);
+ if (!node) {
+ prerror("VPD: Creating node at %s@%llx failed\n",
+ name, addr);
+ return;
+ }
+ slca_vpd_add_loc_code(node, be16_to_cpu(child->my_index));
+
+ /* Add child FRU type */
+ dt_add_property(node, "fru-type", child->fru_id, 2);
+
+ /* recursively add children */
+ vpd_add_children(node, be16_to_cpu(child->my_index));
+ }
+
+ /* Skip dups -- currently we presume dups are contiguous */
+ if (child->nr_dups > 0)
+ current_child_index += child->nr_dups;
+ current_child_index++;
+ }
+ return;
+}
+
+struct dt_node *dt_add_vpd_node(const struct HDIF_common_hdr *hdr,
+ int indx_fru, int indx_vpd)
+{
+ const void *fruvpd;
+ unsigned int fruvpd_sz;
+ unsigned int fru_id_sz;
+ uint64_t addr;
+ struct dt_node *dt_vpd;
+ struct dt_node *node;
+ const struct spira_fru_id *fru_id;
+ const struct slca_entry *s_entry;
+ const char *vpd_name;
+ const char *name;
+ int len;
+ char *lname;
+
+ fru_id = HDIF_get_idata(hdr, indx_fru, &fru_id_sz);
+ if (!fru_id)
+ return NULL;
+
+ s_entry = slca_get_entry(be16_to_cpu(fru_id->slca_index));
+ if (valid_child_entry(s_entry)) /* Don't populate child VPD here */
+ return NULL;
+
+ fruvpd = HDIF_get_idata(hdr, indx_vpd, &fruvpd_sz);
+ if (!CHECK_SPPTR(fruvpd))
+ return NULL;
+
+ vpd_name = slca_get_vpd_name(be16_to_cpu(fru_id->slca_index));
+ if (!vpd_name) {
+ prerror("VPD: VPD name at index %d couldn't be found\n",
+ fru_id->slca_index);
+ return NULL;
+ }
+
+ dt_vpd = dt_find_by_path(dt_root, "/vpd");
+ if (!dt_vpd)
+ return NULL;
+
+ /* Get node name */
+ name = vpd_map_name(vpd_name);
+ addr = (uint64_t)be16_to_cpu(fru_id->rsrc_id);
+ len = strlen(name) + STR_MAX_CHARS(addr) + 2;
+ lname = zalloc(len);
+ if (!lname) {
+ prerror("VPD: Failed to allocate memory\n");
+ return NULL;
+ }
+ snprintf(lname, len, "%s@%llx", name, (long long)addr);
+
+ /*
+ * FRU can be a child of some other FRU. Make sure
+ * we have not added this node already.
+ */
+ node = dt_find_by_path(dt_vpd, lname);
+ if (node) {
+ free(lname);
+ return NULL;
+ }
+
+ node = dt_new(dt_vpd, lname);
+ if (!node) {
+ free(lname);
+ return NULL;
+ }
+
+ /* Parse VPD fields */
+ dt_add_property(node, "ibm,vpd", fruvpd, fruvpd_sz);
+ vpd_vini_parse(node, fruvpd, fruvpd_sz);
+
+ /* Location code */
+ slca_vpd_add_loc_code(node, be16_to_cpu(fru_id->slca_index));
+ /* Add FRU label */
+ dt_add_property(node, "fru-type", vpd_name, 2);
+ vpd_add_children(node, be16_to_cpu(fru_id->slca_index));
+
+ free(lname);
+ return node;
+}
+
+static void sysvpd_parse(void)
+{
+ const char *model;
+ const char *system_id;
+ const char *brand;
+ char *str;
+ uint8_t sz;
+ const void *sysvpd;
+ unsigned int sysvpd_sz;
+ unsigned int fru_id_sz;
+ struct dt_node *dt_vpd;
+ const struct spira_fru_id *fru_id;
+ struct HDIF_common_hdr *sysvpd_hdr;
+ const struct machine_info *mi;
+
+ sysvpd_hdr = get_hdif(&spira.ntuples.system_vpd, SYSVPD_HDIF_SIG);
+ if (!sysvpd_hdr)
+ goto no_sysvpd;
+
+ fru_id = HDIF_get_idata(sysvpd_hdr, SYSVPD_IDATA_FRU_ID, &fru_id_sz);
+ if (!fru_id)
+ goto no_sysvpd;;
+
+ sysvpd = HDIF_get_idata(sysvpd_hdr, SYSVPD_IDATA_KW_VPD, &sysvpd_sz);
+ if (!CHECK_SPPTR(sysvpd))
+ goto no_sysvpd;
+
+ /* Add system VPD */
+ dt_vpd = dt_find_by_path(dt_root, "/vpd");
+ if (dt_vpd) {
+ dt_add_property(dt_vpd, "ibm,vpd", sysvpd, sysvpd_sz);
+ slca_vpd_add_loc_code(dt_vpd, be16_to_cpu(fru_id->slca_index));
+ }
+
+ model = vpd_find(sysvpd, sysvpd_sz, "VSYS", "TM", &sz);
+ if (!model)
+ goto no_sysvpd;
+ str = zalloc(sz + 1);
+ if (!str)
+ goto no_sysvpd;
+ memcpy(str, model, sz);
+ dt_add_property_string(dt_root, "model", str);
+ mi = machine_info_lookup(str);
+ if (mi)
+ dt_add_property_string(dt_root, "model-name", mi->name);
+ free(str);
+ dt_add_property_string(dt_root, "vendor", "IBM");
+
+ system_id = vpd_find(sysvpd, sysvpd_sz, "VSYS", "SE", &sz);
+ if (!system_id)
+ goto no_sysid;
+ str = zalloc(sz + 1);
+ if (!str)
+ goto no_sysid;
+ memcpy(str, system_id, sz);
+ dt_add_property_string(dt_root, "system-id", str);
+ free(str);
+
+ brand = vpd_find(sysvpd, sysvpd_sz, "VSYS", "BR", &sz);
+ if (!brand)
+ goto no_brand;
+ str = zalloc(sz + 1);
+ if (!str)
+ goto no_brand;
+ memcpy(str, brand, sz);
+ dt_add_property_string(dt_root, "system-brand", str);
+ free(str);
+
+ return;
+
+no_brand:
+ dt_add_property_string(dt_root, "system-brand", "Unknown");
+ return;
+
+no_sysid:
+ dt_add_property_string(dt_root, "system-id", "Unknown");
+ return;
+
+ no_sysvpd:
+ dt_add_property_string(dt_root, "model", "Unknown");
+}
+
+static void iokid_vpd_parse(const struct HDIF_common_hdr *iohub_hdr)
+{
+ const struct HDIF_child_ptr *iokids;
+ const struct HDIF_common_hdr *iokid;
+ unsigned int i;
+
+ iokids = HDIF_child_arr(iohub_hdr, CECHUB_CHILD_IO_KIDS);
+ if (!CHECK_SPPTR(iokids)) {
+ prerror("VPD: No IOKID child array\n");
+ return;
+ }
+
+ for (i = 0; i < be32_to_cpu(iokids->count); i++) {
+ iokid = HDIF_child(iohub_hdr, iokids, i,
+ IOKID_FRU_HDIF_SIG);
+ /* IO KID VPD structure layout is similar to FRUVPD */
+ if (iokid)
+ dt_add_vpd_node(iokid,
+ FRUVPD_IDATA_FRU_ID, FRUVPD_IDATA_KW_VPD);
+ }
+}
+
+static void iohub_vpd_parse(void)
+{
+ const struct HDIF_common_hdr *iohub_hdr;
+ const struct cechub_hub_fru_id *fru_id_data;
+ unsigned int i, vpd_sz, fru_id_sz;
+
+ if (!get_hdif(&spira.ntuples.cec_iohub_fru, CECHUB_FRU_HDIF_SIG)) {
+ prerror("VPD: Could not find IO HUB FRU data\n");
+ return;
+ }
+
+ for_each_ntuple_idx(&spira.ntuples.cec_iohub_fru, iohub_hdr,
+ i, CECHUB_FRU_HDIF_SIG) {
+
+ fru_id_data = HDIF_get_idata(iohub_hdr,
+ CECHUB_FRU_ID_DATA_AREA,
+ &fru_id_sz);
+
+ /* P8, IO HUB is on processor card and we have a
+ * daughter card array
+ */
+ if (fru_id_data &&
+ be32_to_cpu(fru_id_data->card_type) == CECHUB_FRU_TYPE_CPU_CARD) {
+ iokid_vpd_parse(iohub_hdr);
+ continue;
+ }
+
+ /* On P7, the keyword VPD will not be NULL */
+ if (HDIF_get_idata(iohub_hdr,
+ CECHUB_ASCII_KEYWORD_VPD, &vpd_sz))
+ dt_add_vpd_node(iohub_hdr, CECHUB_FRU_ID_DATA,
+ CECHUB_ASCII_KEYWORD_VPD);
+ }
+}
+
+static void _vpd_parse(struct spira_ntuple tuple)
+{
+ const struct HDIF_common_hdr *fruvpd_hdr;
+ unsigned int i;
+
+ if (!get_hdif(&tuple, FRUVPD_HDIF_SIG))
+ return;
+
+ for_each_ntuple_idx(&tuple, fruvpd_hdr, i, FRUVPD_HDIF_SIG)
+ dt_add_vpd_node(fruvpd_hdr,
+ FRUVPD_IDATA_FRU_ID, FRUVPD_IDATA_KW_VPD);
+}
+
+void vpd_parse(void)
+{
+ const struct HDIF_common_hdr *fruvpd_hdr;
+
+ /* Enclosure */
+ _vpd_parse(spira.ntuples.nt_enclosure_vpd);
+
+ /* Backplane */
+ _vpd_parse(spira.ntuples.backplane_vpd);
+
+ /* System VPD uses the VSYS record, so its special */
+ sysvpd_parse();
+
+ /* clock card -- does this use the FRUVPD sig? */
+ _vpd_parse(spira.ntuples.clock_vpd);
+
+ /* Anchor card */
+ _vpd_parse(spira.ntuples.anchor_vpd);
+
+ /* Op panel -- does this use the FRUVPD sig? */
+ _vpd_parse(spira.ntuples.op_panel_vpd);
+
+ /* External cache FRU vpd -- does this use the FRUVPD sig? */
+ _vpd_parse(spira.ntuples.ext_cache_fru_vpd);
+
+ /* Misc CEC FRU */
+ _vpd_parse(spira.ntuples.misc_cec_fru_vpd);
+
+ /* CEC IO HUB FRU */
+ iohub_vpd_parse();
+
+ /*
+ * SP subsystem -- while the rest of the SPINFO structure is
+ * different, the FRU ID data and pointer pair to keyword VPD
+ * are the same offset as a FRUVPD entry. So reuse it
+ */
+ fruvpd_hdr = get_hdif(&spira.ntuples.sp_subsys, SPSS_HDIF_SIG);
+ if (fruvpd_hdr)
+ dt_add_vpd_node(fruvpd_hdr,
+ FRUVPD_IDATA_FRU_ID, FRUVPD_IDATA_KW_VPD);
+}
diff --git a/hw/Makefile.inc b/hw/Makefile.inc
new file mode 100644
index 0000000..14bf8e7
--- /dev/null
+++ b/hw/Makefile.inc
@@ -0,0 +1,15 @@
+# -*-Makefile-*-
+
+SUBDIRS += hw
+HW_OBJS = xscom.o chiptod.o gx.o cec.o lpc.o lpc-uart.o psi.o
+HW_OBJS += homer.o slw.o occ.o nx.o fsi-master.o centaur.o
+HW_OBJS += p7ioc.o p7ioc-inits.o p7ioc-phb.o p5ioc2.o p5ioc2-phb.o
+HW_OBJS += phb3.o sfc-ctrl.o
+HW=hw/built-in.o
+
+include $(SRC)/hw/fsp/Makefile.inc
+include $(SRC)/hw/ec/Makefile.inc
+include $(SRC)/hw/ast-bmc/Makefile.inc
+
+$(HW): $(HW_OBJS:%=hw/%) $(FSP) $(EC) $(AST_BMC)
+
diff --git a/hw/ast-bmc/Makefile.inc b/hw/ast-bmc/Makefile.inc
new file mode 100644
index 0000000..a97c0db
--- /dev/null
+++ b/hw/ast-bmc/Makefile.inc
@@ -0,0 +1,5 @@
+SUBDIRS += hw/ast-bmc
+
+AST_BMC_OBJS = ast-io.o ast-sf-ctrl.o
+AST_BMC = hw/ast-bmc/built-in.o
+$(AST_BMC): $(AST_BMC_OBJS:%=hw/ast-bmc/%)
diff --git a/hw/ast-bmc/ast-io.c b/hw/ast-bmc/ast-io.c
new file mode 100644
index 0000000..e89bf7f
--- /dev/null
+++ b/hw/ast-bmc/ast-io.c
@@ -0,0 +1,324 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Note about accesses to the AST2400 internal memory map:
+ *
+ * There are two ways to genrate accesses to the AHB bus of the AST2400
+ * from the host. The LPC->AHB bridge and the iLPC->AHB bridge.
+ *
+ * LPC->AHB bridge
+ * ---------------
+ *
+ * This bridge directly converts memory or firmware accesses using
+ * a set of registers for establishing a remapping window. We prefer
+ * using FW space as normal memory space is limited to byte accesses
+ * to a fixed 256M window, while FW space allows us to use different
+ * access sizes and to control the IDSEL bits which essentially enable
+ * a full 4G addres space.
+ *
+ * The way FW accesses map onto AHB is controlled via two registers
+ * in the BMC's LPC host controller:
+ *
+ * HICR7 at 0x1e789088 [31:16] : ADRBASE
+ * [15:00] : HWMBASE
+ *
+ * HICR8 at 0x1e78908c [31:16] : ADRMASK
+ * [15:00] : HWNCARE
+ *
+ * All decoding/remapping happens on the top 16 bits of the LPC address
+ * named LPC_ADDR as follow:
+ *
+ * - For decoding, LPC_ADDR bits are compared with HWMBASE if the
+ * corresponding bit in HWNCARE is 0.
+ *
+ * - For remapping, the AHB address is constructed by taking bits
+ * from LPC_ADDR if the corresponding bit in ADRMASK is 0 or in
+ * ADRBASE if the corresponding bit in ADRMASK is 1
+ *
+ * Example of 2MB SPI flash, LPC 0xFCE00000~0xFCFFFFFF onto
+ * AHB 0x30000000~0x301FFFFF (SPI flash)
+ *
+ * ADRBASE=0x3000 HWMBASE=0xFCE0
+ * ADRMASK=0xFFE0 HWNCARE=0x001F
+ *
+ * This comes pre-configured by the BMC or HostBoot to access the PNOR
+ * flash from IDSEL 0 as follow:
+ *
+ * ADRBASE=0x3000 HWMBASE=0x0e00
+ * ADRMASK=0xfe00 HWNCARE=0x01ff
+ *
+ * Which means mapping of LPC 0x0e000000..0x0fffffff onto
+ * AHB 0x30000000..0x31ffffff
+ *
+ * iLPC->AHB bridge
+ * ---------------
+ *
+ * This bridge is hosted in the SuperIO part of the BMC and is
+ * controlled by a series of byte-sized registers accessed indirectly
+ * via IO ports 0x2e and 0x2f.
+ *
+ * Via these, byte by byte, we can construct an AHB address and
+ * fill a data buffer to trigger a write cycle, or we can do a
+ * read cycle and read back the data, byte after byte.
+ *
+ * This is fairly convoluted and slow but works regardless of what
+ * mapping was established in the LPC->AHB bridge.
+ *
+ * For the time being, we use the iLPC->AHB for everything except
+ * pnor accesses. In the long run, we will reconfigure the LPC->AHB
+ * to provide more direct access to all of the BMC addres space but
+ * we'll only do that after the boot script/program on the BMC is
+ * updated to restore the bridge to a state compatible with the SBE
+ * expectations on boot.
+ */
+
+#include <skiboot.h>
+#include <lpc.h>
+#include <lock.h>
+
+#include "ast.h"
+
+static struct lock bmc_sio_lock = LOCK_UNLOCKED;
+
+/*
+ * SuperIO indirect accesses
+ */
+static void bmc_sio_outb(uint8_t val, uint8_t reg)
+{
+ lpc_outb(reg, 0x2e);
+ lpc_outb(val, 0x2f);
+}
+
+static uint8_t bmc_sio_inb(uint8_t reg)
+{
+ lpc_outb(reg, 0x2e);
+ return lpc_inb(0x2f);
+}
+
+/*
+ * AHB accesses via iLPC->AHB in SuperIO. Works on byteswapped
+ * values (ie. Little Endian registers)
+ */
+static void bmc_sio_ahb_prep(uint32_t reg, uint8_t type)
+{
+ /* Address */
+ bmc_sio_outb((reg >> 24) & 0xff, 0xf0);
+ bmc_sio_outb((reg >> 16) & 0xff, 0xf1);
+ bmc_sio_outb((reg >> 8) & 0xff, 0xf2);
+ bmc_sio_outb((reg ) & 0xff, 0xf3);
+
+ /* bytes cycle type */
+ bmc_sio_outb(type, 0xf8);
+}
+
+static void bmc_sio_ahb_writel(uint32_t val, uint32_t reg)
+{
+ lock(&bmc_sio_lock);
+
+ bmc_sio_ahb_prep(reg, 2);
+
+ /* Write data */
+ bmc_sio_outb(val >> 24, 0xf4);
+ bmc_sio_outb(val >> 16, 0xf5);
+ bmc_sio_outb(val >> 8, 0xf6);
+ bmc_sio_outb(val , 0xf7);
+
+ /* Trigger */
+ bmc_sio_outb(0xcf, 0xfe);
+
+ unlock(&bmc_sio_lock);
+}
+
+static uint32_t bmc_sio_ahb_readl(uint32_t reg)
+{
+ uint32_t val = 0;
+
+ lock(&bmc_sio_lock);
+
+ bmc_sio_ahb_prep(reg, 2);
+
+ /* Trigger */
+ bmc_sio_inb(0xfe);
+
+ /* Read results */
+ val = (val << 8) | bmc_sio_inb(0xf4);
+ val = (val << 8) | bmc_sio_inb(0xf5);
+ val = (val << 8) | bmc_sio_inb(0xf6);
+ val = (val << 8) | bmc_sio_inb(0xf7);
+
+ unlock(&bmc_sio_lock);
+
+ return val;
+}
+
+static void bmc_sio_ahb_init(void)
+{
+ /* Send SuperIO password */
+ lpc_outb(0xa5, 0x2e);
+ lpc_outb(0xa5, 0x2e);
+
+ /* Select logical dev d */
+ bmc_sio_outb(0x0d, 0x07);
+
+ /* Enable iLPC->AHB */
+ bmc_sio_outb(0x01, 0x30);
+
+ /* We leave the SuperIO enabled and unlocked for
+ * subsequent accesses.
+ */
+}
+
+/*
+ * External API
+ *
+ * We only support 4-byte accesses to all of AHB. We additionally
+ * support 1-byte accesses to the flash area only.
+ *
+ * We could support all access sizes via iLPC but we don't need
+ * that for now.
+ */
+#define PNOR_AHB_ADDR 0x30000000
+#define PNOR_LPC_OFFSET 0x0e000000
+
+void ast_ahb_writel(uint32_t val, uint32_t reg)
+{
+ /* For now, always use iLPC->AHB, it will byteswap */
+ bmc_sio_ahb_writel(val, reg);
+}
+
+uint32_t ast_ahb_readl(uint32_t reg)
+{
+ /* For now, always use iLPC->AHB, it will byteswap */
+ return bmc_sio_ahb_readl(reg);
+}
+
+int ast_copy_to_ahb(uint32_t reg, const void *src, uint32_t len)
+{
+ /* Check we don't cross IDSEL segments */
+ if ((reg ^ (reg + len - 1)) >> 28)
+ return -EINVAL;
+
+ /* SPI flash, use LPC->AHB bridge */
+ if ((reg >> 28) == (PNOR_AHB_ADDR >> 28)) {
+ uint32_t chunk, off = reg - PNOR_AHB_ADDR + PNOR_LPC_OFFSET;
+ int64_t rc;
+
+ while(len) {
+ /* Chose access size */
+ if (len > 3 && !(off & 3)) {
+ rc = lpc_write(OPAL_LPC_FW, off,
+ *(uint32_t *)src, 4);
+ chunk = 4;
+ } else {
+ rc = lpc_write(OPAL_LPC_FW, off,
+ *(uint8_t *)src, 1);
+ chunk = 1;
+ }
+ if (rc) {
+ prerror("AST_IO: lpc_write.sb failure %lld"
+ " to FW 0x%08x\n", rc, off);
+ return rc;
+ }
+ len -= chunk;
+ off += chunk;
+ src += chunk;
+ }
+ return 0;
+ }
+
+ /* Otherwise we don't do byte access (... yet) */
+ prerror("AST_IO: Attempted write bytes access to %08x\n", reg);
+ return -EINVAL;
+}
+
+int ast_copy_from_ahb(void *dst, uint32_t reg, uint32_t len)
+{
+ /* Check we don't cross IDSEL segments */
+ if ((reg ^ (reg + len - 1)) >> 28)
+ return -EINVAL;
+
+ /* SPI flash, use LPC->AHB bridge */
+ if ((reg >> 28) == (PNOR_AHB_ADDR >> 28)) {
+ uint32_t chunk, off = reg - PNOR_AHB_ADDR + PNOR_LPC_OFFSET;
+ int64_t rc;
+
+ while(len) {
+ uint32_t dat;
+
+ /* Chose access size */
+ if (len > 3 && !(off & 3)) {
+ rc = lpc_read(OPAL_LPC_FW, off, &dat, 4);
+ if (!rc)
+ *(uint32_t *)dst = dat;
+ chunk = 4;
+ } else {
+ rc = lpc_read(OPAL_LPC_FW, off, &dat, 1);
+ if (!rc)
+ *(uint8_t *)dst = dat;
+ chunk = 1;
+ }
+ if (rc) {
+ prerror("AST_IO: lpc_read.sb failure %lld"
+ " to FW 0x%08x\n", rc, off);
+ return rc;
+ }
+ len -= chunk;
+ off += chunk;
+ dst += chunk;
+ }
+ return 0;
+ }
+ /* Otherwise we don't do byte access (... yet) */
+ prerror("AST_IO: Attempted read bytes access to %08x\n", reg);
+ return -EINVAL;
+}
+
+void ast_io_init(void)
+{
+ /* Initialize iLPC->AHB bridge */
+ bmc_sio_ahb_init();
+
+ /* Configure the LPC->AHB bridge for PNOR access (just in case) */
+ bmc_sio_ahb_writel(0x30000e00, LPC_HICR7);
+ bmc_sio_ahb_writel(0xfe0001ff, LPC_HICR8);
+ bmc_sio_ahb_writel(0x00000500, LPC_HICR6);
+}
+
+/* Setup SuperIO UART 1*/
+void ast_setup_uart1(uint16_t io_base, uint8_t irq)
+{
+ /* Send SuperIO password */
+ lpc_outb(0xa5, 0x2e);
+ lpc_outb(0xa5, 0x2e);
+
+ /* Select logical dev 2 */
+ bmc_sio_outb(0x02, 0x07);
+
+ /* Disable UART1 for configuration */
+ bmc_sio_outb(0x01, 0x30);
+
+ /* Configure base and interrupt */
+ bmc_sio_outb(io_base >> 8, 0x60);
+ bmc_sio_outb(io_base & 0xff, 0x61);
+ bmc_sio_outb(irq, 0x70);
+ bmc_sio_outb(0x01, 0x71); /* level low */
+
+ /* Enable UART1 */
+ bmc_sio_outb(0x01, 0x30);
+
+ /* Re-lock SuperIO */
+ lpc_outb(0xaa, 0x2e);
+}
diff --git a/hw/ast-bmc/ast-sf-ctrl.c b/hw/ast-bmc/ast-sf-ctrl.c
new file mode 100644
index 0000000..e0d5fcc
--- /dev/null
+++ b/hw/ast-bmc/ast-sf-ctrl.c
@@ -0,0 +1,412 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <libflash/libflash.h>
+#include <libflash/libflash-priv.h>
+
+#include "ast.h"
+
+#ifndef __unused
+#define __unused __attribute__((unused))
+#endif
+
+struct ast_sf_ctrl {
+ /* We have 2 controllers, one for the BMC flash, one for the PNOR */
+ uint8_t type;
+
+ /* Address and previous value of the ctrl register */
+ uint32_t ctl_reg;
+
+ /* Control register value for normal commands */
+ uint32_t ctl_val;
+
+ /* Control register value for (fast) reads */
+ uint32_t ctl_read_val;
+
+ /* Address of the flash mapping */
+ uint32_t flash;
+
+ /* Current 4b mode */
+ bool mode_4b;
+
+ /* Callbacks */
+ struct spi_flash_ctrl ops;
+};
+
+static int ast_sf_start_cmd(struct ast_sf_ctrl *ct, uint8_t cmd)
+{
+ /* Switch to user mode, CE# dropped */
+ ast_ahb_writel(ct->ctl_val | 7, ct->ctl_reg);
+
+ /* user mode, CE# active */
+ ast_ahb_writel(ct->ctl_val | 3, ct->ctl_reg);
+
+ /* write cmd */
+ return ast_copy_to_ahb(ct->flash, &cmd, 1);
+}
+
+static void ast_sf_end_cmd(struct ast_sf_ctrl *ct)
+{
+ /* clear CE# */
+ ast_ahb_writel(ct->ctl_val | 7, ct->ctl_reg);
+
+ /* Switch back to read mode */
+ ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+}
+
+static int ast_sf_send_addr(struct ast_sf_ctrl *ct, uint32_t addr)
+{
+ const void *ap;
+
+ /* Layout address MSB first in memory */
+ addr = cpu_to_be32(addr);
+
+ /* Send the right amount of bytes */
+ ap = (char *)&addr;
+
+ if (ct->mode_4b)
+ return ast_copy_to_ahb(ct->flash, ap, 4);
+ else
+ return ast_copy_to_ahb(ct->flash, ap + 1, 3);
+}
+
+static int ast_sf_cmd_rd(struct spi_flash_ctrl *ctrl, uint8_t cmd,
+ bool has_addr, uint32_t addr, void *buffer,
+ uint32_t size)
+{
+ struct ast_sf_ctrl *ct = container_of(ctrl, struct ast_sf_ctrl, ops);
+ int rc;
+
+ rc = ast_sf_start_cmd(ct, cmd);
+ if (rc)
+ goto bail;
+ if (has_addr) {
+ rc = ast_sf_send_addr(ct, addr);
+ if (rc)
+ goto bail;
+ }
+ if (buffer && size)
+ rc = ast_copy_from_ahb(buffer, ct->flash, size);
+ bail:
+ ast_sf_end_cmd(ct);
+ return rc;
+}
+
+static int ast_sf_cmd_wr(struct spi_flash_ctrl *ctrl, uint8_t cmd,
+ bool has_addr, uint32_t addr, const void *buffer,
+ uint32_t size)
+{
+ struct ast_sf_ctrl *ct = container_of(ctrl, struct ast_sf_ctrl, ops);
+ int rc;
+
+ rc = ast_sf_start_cmd(ct, cmd);
+ if (rc)
+ goto bail;
+ if (has_addr) {
+ rc = ast_sf_send_addr(ct, addr);
+ if (rc)
+ goto bail;
+ }
+ if (buffer && size)
+ rc = ast_copy_to_ahb(ct->flash, buffer, size);
+ bail:
+ ast_sf_end_cmd(ct);
+ return rc;
+}
+
+static int ast_sf_set_4b(struct spi_flash_ctrl *ctrl, bool enable)
+{
+ struct ast_sf_ctrl *ct = container_of(ctrl, struct ast_sf_ctrl, ops);
+
+ if (ct->type != AST_SF_TYPE_PNOR)
+ return enable ? FLASH_ERR_4B_NOT_SUPPORTED : 0;
+
+ /*
+ * We update the "old" value as well since when quitting
+ * we don't restore the mode of the flash itself so we need
+ * to leave the controller in a compatible setup
+ */
+ if (enable) {
+ ct->ctl_val |= 0x2000;
+ ct->ctl_read_val |= 0x2000;
+ } else {
+ ct->ctl_val &= ~0x2000;
+ ct->ctl_read_val &= ~0x2000;
+ }
+ ct->mode_4b = enable;
+
+ /* Update read mode */
+ ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+
+ return 0;
+}
+
+static int ast_sf_read(struct spi_flash_ctrl *ctrl, uint32_t pos,
+ void *buf, uint32_t len)
+{
+ struct ast_sf_ctrl *ct = container_of(ctrl, struct ast_sf_ctrl, ops);
+
+ /*
+ * We are in read mode by default. We don't yet support fancy
+ * things like fast read or X2 mode
+ */
+ return ast_copy_from_ahb(buf, ct->flash + pos, len);
+}
+
+static int ast_sf_setup(struct spi_flash_ctrl *ctrl, struct flash_info *info,
+ uint32_t *tsize)
+{
+ struct ast_sf_ctrl *ct = container_of(ctrl, struct ast_sf_ctrl, ops);
+
+ (void)tsize;
+
+ /*
+ * Configure better timings and read mode for known
+ * flash chips
+ */
+ switch(info->id) {
+ case 0xc22019: /* MX25L25635F */
+ case 0xc2201a: /* MX66L51235F */
+ /*
+ * Those Macronix chips support dual IO reads at 104Mhz
+ * with 8 dummy cycles so let's use HCLK/2 which is 96Mhz.
+ *
+ * We use DREAD (dual read) for now as it defaults to 8
+ * dummy cycles. Eventually we'd like to use 2READ (which
+ * also has the address using 2 IOs) but that defaults
+ * to 6 dummy cycles and we can only do a multiple of bytes
+ * (Note: I think that accounts for the dual IO so a byte is
+ * probably 4 clocks in that mode, but I need to dlb check).
+ *
+ * We can change the configuration of the flash so we can
+ * do that later, it's a bit more complex.
+ *
+ * The CE# inactive width for reads must be 7ns, we set it
+ * to 2T which is about 10.4ns.
+ *
+ * For write and program it's 30ns so let's set the value
+ * for normal ops to 6T.
+ *
+ * Preserve the current 4b mode.
+ */
+ ct->ctl_read_val = (ct->ctl_read_val & 0x2000) |
+ (0x02 << 28) | /* Dual bit data only */
+ (0x0e << 24) | /* CE# width 2T (b1110) */
+ (0x3b << 16) | /* DREAD command */
+ (0x07 << 8) | /* HCLK/2 */
+ (0x01 << 6) | /* 1-byte dummy cycle */
+ (0x01); /* fast read */
+
+ /* Configure SPI flash read timing ? */
+
+ /*
+ * For other commands and writes also increase the SPI clock
+ * to HCLK/2 since the chip supports up to 133Mhz and set
+ * CE# inactive to 6T
+ */
+ ct->ctl_val = (ct->ctl_val & 0x2000) |
+ (0x00 << 28) | /* Single bit */
+ (0x0a << 24) | /* CE# width 6T (b1010) */
+ (0x00 << 16) | /* no command */
+ (0x07 << 8) | /* HCLK/2 */
+ (0x00 << 6) | /* no dummy cycle */
+ (0x00); /* normal read */
+
+ /* Update chip with current read config */
+ ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+ break;
+ case 0xef4018: /* W25Q128BV */
+ /*
+ * This Windbond chip support dual IO reads at 104Mhz
+ * with 8 dummy cycles so let's use HCLK/2.
+ *
+ * The CE# inactive width for reads must be 10ns, we set it
+ * to 3T which is about 15.6ns.
+ */
+ ct->ctl_read_val =
+ (0x02 << 28) | /* Dual bit data only */
+ (0x0e << 24) | /* CE# width 2T (b1110) */
+ (0x3b << 16) | /* DREAD command */
+ (0x07 << 8) | /* HCLK/2 */
+ (0x01 << 6) | /* 1-byte dummy cycle */
+ (0x01); /* fast read */
+
+ /* Configure SPI flash read timing ? */
+
+ /*
+ * For other commands and writes also increase the SPI clock
+ * to HCLK/2 since the chip supports up to 133Mhz. CE# inactive
+ * for write and erase is 50ns so let's set it to 10T.
+ */
+ ct->ctl_val =
+ (0x00 << 28) | /* Single bit */
+ (0x06 << 24) | /* CE# width 10T (b0110) */
+ (0x00 << 16) | /* no command */
+ (0x07 << 8) | /* HCLK/2 */
+ (0x00 << 6) | /* no dummy cycle */
+ (0x01); /* fast read */
+
+ /* Update chip with current read config */
+ ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+ break;
+ }
+ return 0;
+}
+
+static bool ast_sf_init_pnor(struct ast_sf_ctrl *ct)
+{
+ uint32_t reg;
+
+ ct->ctl_reg = PNOR_SPI_FCTL_CTRL;
+ ct->flash = PNOR_FLASH_BASE;
+
+ /* Enable writing to the controller */
+ reg = ast_ahb_readl(PNOR_SPI_FCTL_CONF);
+ if (reg == 0xffffffff) {
+ FL_ERR("AST_SF: Failed read from controller config\n");
+ return false;
+ }
+ ast_ahb_writel(reg | 1, PNOR_SPI_FCTL_CONF);
+
+ /*
+ * Snapshot control reg and sanitize it for our
+ * use, switching to 1-bit mode, clearing user
+ * mode if set, etc...
+ *
+ * Also configure SPI clock to something safe
+ * like HCLK/8 (24Mhz)
+ */
+ ct->ctl_val = ast_ahb_readl(ct->ctl_reg);
+ if (ct->ctl_val == 0xffffffff) {
+ FL_ERR("AST_SF: Failed read from controller control\n");
+ return false;
+ }
+
+ ct->ctl_val = (ct->ctl_val & 0x2000) |
+ (0x00 << 28) | /* Single bit */
+ (0x00 << 24) | /* CE# width 16T */
+ (0x00 << 16) | /* no command */
+ (0x04 << 8) | /* HCLK/8 */
+ (0x00 << 6) | /* no dummy cycle */
+ (0x00); /* normal read */
+
+ /* Initial read mode is default */
+ ct->ctl_read_val = ct->ctl_val;
+
+ /* Configure for read */
+ ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+
+ if (ct->ctl_val & 0x2000)
+ ct->mode_4b = true;
+ else
+ ct->mode_4b = false;
+
+ return true;
+}
+
+static bool ast_sf_init_bmc(struct ast_sf_ctrl *ct)
+{
+ ct->ctl_reg = BMC_SPI_FCTL_CTRL;
+ ct->flash = BMC_FLASH_BASE;
+
+ /*
+ * Snapshot control reg and sanitize it for our
+ * use, switching to 1-bit mode, clearing user
+ * mode if set, etc...
+ *
+ * Also configure SPI clock to something safe
+ * like HCLK/8 (24Mhz)
+ */
+ ct->ctl_val =
+ (0x00 << 28) | /* Single bit */
+ (0x00 << 24) | /* CE# width 16T */
+ (0x00 << 16) | /* no command */
+ (0x04 << 8) | /* HCLK/8 */
+ (0x00 << 6) | /* no dummy cycle */
+ (0x00); /* normal read */
+
+ /* Initial read mode is default */
+ ct->ctl_read_val = ct->ctl_val;
+
+ /* Configure for read */
+ ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+
+ ct->mode_4b = false;
+
+ return true;
+}
+
+int ast_sf_open(uint8_t type, struct spi_flash_ctrl **ctrl)
+{
+ struct ast_sf_ctrl *ct;
+
+ if (type != AST_SF_TYPE_PNOR && type != AST_SF_TYPE_BMC)
+ return -EINVAL;
+
+ *ctrl = NULL;
+ ct = malloc(sizeof(*ct));
+ if (!ct) {
+ FL_ERR("AST_SF: Failed to allocate\n");
+ return -ENOMEM;
+ }
+ memset(ct, 0, sizeof(*ct));
+ ct->type = type;
+ ct->ops.cmd_wr = ast_sf_cmd_wr;
+ ct->ops.cmd_rd = ast_sf_cmd_rd;
+ ct->ops.set_4b = ast_sf_set_4b;
+ ct->ops.read = ast_sf_read;
+ ct->ops.setup = ast_sf_setup;
+
+ if (type == AST_SF_TYPE_PNOR) {
+ if (!ast_sf_init_pnor(ct))
+ goto fail;
+ } else {
+ if (!ast_sf_init_bmc(ct))
+ goto fail;
+ }
+
+ *ctrl = &ct->ops;
+
+ return 0;
+ fail:
+ free(ct);
+ return -EIO;
+}
+
+void ast_sf_close(struct spi_flash_ctrl *ctrl)
+{
+ struct ast_sf_ctrl *ct = container_of(ctrl, struct ast_sf_ctrl, ops);
+
+ /* Restore control reg to read */
+ ast_ahb_writel(ct->ctl_read_val, ct->ctl_reg);
+
+ /* Additional cleanup */
+ if (ct->type == AST_SF_TYPE_PNOR) {
+ uint32_t reg = ast_ahb_readl(PNOR_SPI_FCTL_CONF);
+ if (reg != 0xffffffff)
+ ast_ahb_writel(reg & ~1, PNOR_SPI_FCTL_CONF);
+ }
+
+ /* Free the whole lot */
+ free(ct);
+}
+
diff --git a/hw/cec.c b/hw/cec.c
new file mode 100644
index 0000000..d8d1354
--- /dev/null
+++ b/hw/cec.c
@@ -0,0 +1,84 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <cec.h>
+#include <p7ioc.h>
+#include <p5ioc2.h>
+#include <interrupts.h>
+
+/*
+ * Note: This file os only used on P7/P7+
+ */
+#define MAX_IO_HUBS 0x80
+
+static struct io_hub *cec_iohubs[MAX_IO_HUBS];
+
+struct io_hub *cec_get_hub_by_id(uint32_t hub_id)
+{
+ if (hub_id >= MAX_IO_HUBS)
+ return NULL;
+ return cec_iohubs[hub_id];
+}
+
+void cec_register(struct io_hub *hub)
+{
+ cec_iohubs[hub->hub_id] = hub;
+}
+
+void cec_reset(void)
+{
+ unsigned int i;
+
+ /* Reset IO Hubs */
+ for (i = 0; i < MAX_IO_HUBS; i++) {
+ if (!cec_iohubs[i] || !cec_iohubs[i]->ops->reset)
+ continue;
+ cec_iohubs[i]->ops->reset(cec_iohubs[i]);
+ }
+}
+
+static int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id,
+ uint64_t tce_mem_addr,
+ uint64_t tce_mem_size)
+{
+ struct io_hub *hub = cec_get_hub_by_id(hub_id);
+
+ if (!hub)
+ return OPAL_PARAMETER;
+
+ if (!hub->ops->set_tce_mem)
+ return OPAL_UNSUPPORTED;
+
+ return hub->ops->set_tce_mem(hub, tce_mem_addr, tce_mem_size);
+}
+opal_call(OPAL_PCI_SET_HUB_TCE_MEMORY, opal_pci_set_hub_tce_memory, 3);
+
+static int64_t opal_pci_get_hub_diag_data(uint64_t hub_id,
+ void *diag_buffer,
+ uint64_t diag_buffer_len)
+{
+ struct io_hub *hub = cec_get_hub_by_id(hub_id);
+
+ if (!hub)
+ return OPAL_PARAMETER;
+
+ if (!hub->ops->get_diag_data)
+ return OPAL_UNSUPPORTED;
+
+ return hub->ops->get_diag_data(hub, diag_buffer, diag_buffer_len);
+}
+opal_call(OPAL_PCI_GET_HUB_DIAG_DATA, opal_pci_get_hub_diag_data, 3);
diff --git a/hw/centaur.c b/hw/centaur.c
new file mode 100644
index 0000000..20f63cc
--- /dev/null
+++ b/hw/centaur.c
@@ -0,0 +1,326 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <xscom.h>
+#include <processor.h>
+#include <device.h>
+#include <chip.h>
+#include <centaur.h>
+#include <lock.h>
+#include <fsi-master.h>
+
+/*
+ * Centaur chip IDs are using the XSCOM "partID" encoding
+ * described in xscom.h. recap:
+ *
+ * 0b1000.0000.0000.0000.0000.00NN.NCCC.MMMM
+ * N=Node, C=Chip, M=Memory Channel
+ *
+ * We currently use FSI exclusively for centaur access. We can
+ * start using MMIO on Centaur DD2.x when we have a way to handle
+ * machine checks happening inside Sapphire which we don't at the
+ * moment.
+ */
+struct centaur_chip {
+ bool valid;
+ uint8_t ec_level;
+ uint32_t fsi_master_chip_id;
+ uint32_t fsi_master_port;
+ uint32_t fsi_master_engine;
+ struct lock lock;
+};
+
+/* Is that correct ? */
+#define MAX_CENTAURS_PER_CHIP 8
+
+/*
+ * FSI2PIB register definitions (this could be moved out if we were to
+ * support FSI master to other chips.
+ */
+#define FSI_DATA0_REG 0x1000
+#define FSI_DATA1_REG 0x1004
+#define FSI_CMD_REG 0x1008
+#define FSI_CMD_WR 0x80000000
+#define FSI_CMD_RD 0x00000000
+#define FSI_ENG_RESET_REG 0x1018
+#define FSI_STATUS_REG 0x101c
+#define FSI_STATUS_ABORT 0x00100000
+#define FSI_STATUS_ERRORS 0x00007000
+
+static int64_t centaur_fsiscom_complete(struct centaur_chip *centaur)
+{
+ int64_t rc;
+ uint32_t stat;
+
+ rc = mfsi_read(centaur->fsi_master_chip_id, centaur->fsi_master_engine,
+ centaur->fsi_master_port, FSI_STATUS_REG, &stat);
+ if (rc) {
+ /* XXX Improve logging */
+ prerror("CENTAUR: MFSI read error %lld reading STAT\n", rc);
+ return rc;
+ }
+ if ((stat & (FSI_STATUS_ABORT | FSI_STATUS_ERRORS)) == 0)
+ return OPAL_SUCCESS;
+
+ prerror("CENTAUR: Remote FSI error, stat=0x%08x\n", stat);
+
+ /* XXX Handle recovery */
+
+ return OPAL_HARDWARE;
+}
+
+static int64_t centaur_fsiscom_read(struct centaur_chip *centaur, uint32_t pcb_addr,
+ uint64_t *val)
+{
+ int64_t rc;
+ uint32_t data0, data1;
+
+ rc = mfsi_write(centaur->fsi_master_chip_id, centaur->fsi_master_engine,
+ centaur->fsi_master_port, FSI_CMD_REG, pcb_addr | FSI_CMD_RD);
+ if (rc) {
+ /* XXX Improve logging */
+ prerror("CENTAUR: MFSI write error %lld writing CMD\n", rc);
+ return rc;
+ }
+
+ rc = centaur_fsiscom_complete(centaur);
+ if (rc)
+ return rc;
+
+ rc = mfsi_read(centaur->fsi_master_chip_id, centaur->fsi_master_engine,
+ centaur->fsi_master_port, FSI_DATA0_REG, &data0);
+ if (rc) {
+ /* XXX Improve logging */
+ prerror("CENTAUR: MFSI read error %lld reading DATA0\n", rc);
+ return rc;
+ }
+ rc = mfsi_read(centaur->fsi_master_chip_id, centaur->fsi_master_engine,
+ centaur->fsi_master_port, FSI_DATA1_REG, &data1);
+ if (rc) {
+ /* XXX Improve logging */
+ prerror("CENTAUR: MFSI read error %lld readking DATA1\n", rc);
+ return rc;
+ }
+
+ *val = (((uint64_t)data0) << 32) | data1;
+
+ return OPAL_SUCCESS;
+}
+
+static struct centaur_chip *centaur_get(uint32_t part_id)
+{
+ uint32_t hchip_id, mchan;
+ struct proc_chip *hchip;
+ struct centaur_chip *centaur;
+
+ if ((part_id >> 28) != 8) {
+ prerror("CENTAUR: Invalid part ID 0x%x\n", part_id);
+ return NULL;
+ }
+ hchip_id = (part_id & 0x0fffffff) >> 4;
+ mchan = part_id & 0xf;
+
+ hchip = get_chip(hchip_id);
+ if (!hchip) {
+ prerror("CENTAUR: Centaur 0x%x not found on non-existing chip 0%x\n",
+ part_id, hchip_id);
+ return NULL;
+ }
+ if (mchan >= MAX_CENTAURS_PER_CHIP) {
+ prerror("CENTAUR: Centaur 0x%x channel out of bounds !\n", part_id);
+ return NULL;
+ }
+ if (!hchip->centaurs) {
+ prerror("CENTAUR: Centaur 0x%x not found on chip 0%x (no centaurs)\n",
+ part_id, hchip_id);
+ return NULL;
+ }
+ centaur = &hchip->centaurs[mchan];
+ if (!centaur->valid) {
+ prerror("CENTAUR: Centaur 0x%x not valid on chip 0%x\n",
+ part_id, hchip_id);
+ return NULL;
+ }
+ return centaur;
+}
+
+static int64_t centaur_fsiscom_write(struct centaur_chip *centaur, uint32_t pcb_addr,
+ uint64_t val)
+{
+ int64_t rc;
+
+ rc = mfsi_write(centaur->fsi_master_chip_id, centaur->fsi_master_engine,
+ centaur->fsi_master_port, FSI_DATA0_REG, hi32(val));
+ if (rc) {
+ /* XXX Improve logging */
+ prerror("CENTAUR: MFSI write error %lld writing DATA0\n", rc);
+ return rc;
+ }
+ rc = mfsi_write(centaur->fsi_master_chip_id, centaur->fsi_master_engine,
+ centaur->fsi_master_port, FSI_DATA1_REG, lo32(val));
+ if (rc) {
+ /* XXX Improve logging */
+ prerror("CENTAUR: MFSI write error %lld writing DATA1\n", rc);
+ return rc;
+ }
+ rc = mfsi_write(centaur->fsi_master_chip_id, centaur->fsi_master_engine,
+ centaur->fsi_master_port, FSI_CMD_REG, pcb_addr | FSI_CMD_WR);
+ if (rc) {
+ /* XXX Improve logging */
+ prerror("CENTAUR: MFSI write error %lld writing CMD\n", rc);
+ return rc;
+ }
+
+ return centaur_fsiscom_complete(centaur);
+}
+
+int64_t centaur_xscom_read(uint32_t id, uint64_t pcb_addr, uint64_t *val)
+{
+ struct centaur_chip *centaur = centaur_get(id);
+ int64_t rc;
+
+ if (!centaur)
+ return OPAL_PARAMETER;
+
+ lock(&centaur->lock);
+ rc = centaur_fsiscom_read(centaur, pcb_addr, val);
+ unlock(&centaur->lock);
+
+ return rc;
+}
+
+int64_t centaur_xscom_write(uint32_t id, uint64_t pcb_addr, uint64_t val)
+{
+ struct centaur_chip *centaur = centaur_get(id);
+ int64_t rc;
+
+ if (!centaur)
+ return OPAL_PARAMETER;
+
+ lock(&centaur->lock);
+ rc = centaur_fsiscom_write(centaur, pcb_addr, val);
+ unlock(&centaur->lock);
+
+ return rc;
+}
+
+static bool centaur_check_id(struct centaur_chip *centaur)
+{
+ int64_t rc;
+ uint64_t val;
+
+ rc = centaur_fsiscom_read(centaur, 0xf000f, &val);
+ if (rc) {
+ prerror("CENTAUR: FSISCOM error %lld reading ID register\n",
+ rc);
+ return false;
+ }
+
+ /* Extract CFAM id */
+ val >>= 44;
+
+ /* Identify chip */
+ if ((val & 0xff) != 0xe9) {
+ prerror("CENTAUR: CFAM ID 0x%02x is not a Centaur !\n",
+ (unsigned int)(val & 0xff));
+ return false;
+ }
+
+ /* Get EC level from CFAM ID */
+ centaur->ec_level = ((val >> 16) & 0xf) << 4;
+ centaur->ec_level |= (val >> 8) & 0xf;
+
+ return true;
+}
+
+static bool centaur_add(uint32_t part_id, uint32_t mchip, uint32_t meng,
+ uint32_t mport)
+{
+ uint32_t hchip_id, mchan;
+ struct proc_chip *hchip;
+ struct centaur_chip *centaur;
+
+ if ((part_id >> 28) != 8) {
+ prerror("CENTAUR: Invalid part ID 0x%x\n", part_id);
+ return false;
+ }
+ hchip_id = (part_id & 0x0fffffff) >> 4;
+ mchan = part_id & 0xf;
+
+ printf("CENTAUR: Found centaur for chip 0x%x channel %d\n",
+ hchip_id, mchan);
+ printf("CENTAUR: FSI host: 0x%x cMFSI%d port %d\n",
+ mchip, meng, mport);
+
+ hchip = get_chip(hchip_id);
+ if (!hchip) {
+ prerror("CENTAUR: No such chip !!!\n");
+ return false;
+ }
+
+ if (mchan >= MAX_CENTAURS_PER_CHIP) {
+ prerror("CENTAUR: Channel out of bounds !\n");
+ return false;
+ }
+
+ if (!hchip->centaurs) {
+ hchip->centaurs =
+ zalloc(sizeof(struct centaur_chip) *
+ MAX_CENTAURS_PER_CHIP);
+ assert(hchip->centaurs);
+ }
+
+ centaur = &hchip->centaurs[mchan];
+ if (centaur->valid) {
+ prerror("CENTAUR: Duplicate centaur !\n");
+ return false;
+ }
+ centaur->fsi_master_chip_id = mchip;
+ centaur->fsi_master_port = mport;
+ centaur->fsi_master_engine = meng ? MFSI_cMFSI1 : MFSI_cMFSI0;
+ init_lock(&centaur->lock);
+
+ if (!centaur_check_id(centaur))
+ return false;
+
+ printf("CENTAUR: ChipID 0x%x [DD%x.%x]\n", part_id,
+ centaur->ec_level >> 4,
+ centaur->ec_level & 0xf);
+
+ centaur->valid = true;
+ return true;
+}
+
+void centaur_init(void)
+{
+ struct dt_node *cn;
+
+ dt_for_each_compatible(dt_root, cn, "ibm,centaur-v10") {
+ uint32_t chip_id, mchip, meng, mport;
+
+ chip_id = dt_prop_get_u32(cn, "ibm,chip-id");
+ mchip = dt_prop_get_u32(cn, "ibm,fsi-master-chip-id");
+ meng = dt_prop_get_cell(cn, "ibm,fsi-master-port", 0);
+ mport = dt_prop_get_cell(cn, "ibm,fsi-master-port", 1);
+
+ /*
+ * If adding the centaur succeeds, we expose it to
+ * Linux as a scom-controller
+ */
+ if (centaur_add(chip_id, mchip, meng, mport))
+ dt_add_property(cn, "scom-controller", NULL, 0);
+ }
+}
diff --git a/hw/chiptod.c b/hw/chiptod.c
new file mode 100644
index 0000000..e24d966
--- /dev/null
+++ b/hw/chiptod.c
@@ -0,0 +1,685 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Handle ChipTOD chip & configure core timebases
+ */
+#include <skiboot.h>
+#include <chiptod.h>
+#include <xscom.h>
+#include <io.h>
+#include <cpu.h>
+#include <timebase.h>
+
+//#define DBG(fmt...) printf("CHIPTOD: " fmt)
+#define DBG(fmt...) do { } while(0)
+
+/* TOD chip XSCOM addresses */
+#define TOD_TTYPE_0 0x00040011
+#define TOD_TTYPE_1 0x00040012 /* PSS switch */
+#define TOD_TTYPE_2 0x00040013 /* Enable step checkers */
+#define TOD_TTYPE_3 0x00040014 /* Request TOD */
+#define TOD_TTYPE_4 0x00040015 /* Send TOD */
+#define TOD_TTYPE_5 0x00040016 /* Invalidate TOD */
+#define TOD_CHIPTOD_TO_TB 0x00040017
+#define TOD_LOAD_TOD_MOD 0x00040018
+#define TOD_CHIPTOD_VALUE 0x00040020
+#define TOD_CHIPTOD_LOAD_TB 0x00040021
+#define TOD_CHIPTOD_FSM 0x00040024
+
+/* -- TOD PIB Master reg -- */
+#define TOD_PIB_MASTER 0x00040027
+#define TOD_PIBM_ADDR_CFG_MCAST PPC_BIT(25)
+#define TOD_PIBM_ADDR_CFG_SLADDR_MASK PPC_BITMASK(26,31)
+#define TOD_PIBM_ADDR_CFG_SLADDR_LSH PPC_BITLSHIFT(31)
+
+/* -- TOD Error interrupt register -- */
+#define TOD_ERROR 0x00040030
+/* SYNC errors */
+#define TOD_ERR_CRMO_PARITY PPC_BIT(0)
+#define TOD_ERR_OSC0_PARITY PPC_BIT(1)
+#define TOD_ERR_OSC1_PARITY PPC_BIT(2)
+#define TOD_ERR_CRITC_PARITY PPC_BIT(13)
+#define TOD_ERR_PSS_HAMMING_DISTANCE PPC_BIT(18)
+#define TOD_ERR_DELAY_COMPL_PARITY PPC_BIT(22)
+/* CNTR errors */
+#define TOD_ERR_CTCR_PARITY PPC_BIT(32)
+#define TOD_ERR_TOD_SYNC_CHECK PPC_BIT(33)
+#define TOD_ERR_TOD_FSM_PARITY PPC_BIT(34)
+#define TOD_ERR_TOD_REGISTER_PARITY PPC_BIT(35)
+#define TOD_ERR_OVERFLOW_YR2042 PPC_BIT(36)
+#define TOD_ERR_TOD_WOF_LSTEP_PARITY PPC_BIT(37)
+#define TOD_ERR_TTYPE0_RECVD PPC_BIT(38)
+#define TOD_ERR_TTYPE1_RECVD PPC_BIT(39)
+#define TOD_ERR_TTYPE2_RECVD PPC_BIT(40)
+#define TOD_ERR_TTYPE3_RECVD PPC_BIT(41)
+#define TOD_ERR_TTYPE4_RECVD PPC_BIT(42)
+#define TOD_ERR_TTYPE5_RECVD PPC_BIT(43)
+
+/* Magic TB value. One step cycle ahead of sync */
+#define INIT_TB 0x000000000001ff0
+
+/* Number of iterations for the various timeouts */
+#define TIMEOUT_LOOPS 20000000
+
+static enum chiptod_type {
+ chiptod_unknown,
+ chiptod_p7,
+ chiptod_p8
+} chiptod_type;
+
+static int32_t chiptod_primary = -1;
+static int32_t chiptod_secondary = -1;
+
+/* The base TFMR value is the same for the whole machine
+ * for now as far as I can tell
+ */
+static uint64_t base_tfmr;
+
+/*
+ * For now, we use a global lock for runtime chiptod operations,
+ * eventually make this a per-core lock for wakeup rsync and
+ * take all of them for RAS cases.
+ */
+static struct lock chiptod_lock = LOCK_UNLOCKED;
+
+static void chiptod_setup_base_tfmr(void)
+{
+ struct dt_node *cpu = this_cpu()->node;
+ uint64_t core_freq, tod_freq;
+ uint64_t mcbs;
+
+ base_tfmr = SPR_TFMR_TB_ECLIPZ;
+
+ /* Get CPU and TOD freqs in Hz */
+ if (dt_has_node_property(cpu,"ibm,extended-clock-frequency", NULL))
+ core_freq = dt_prop_get_u64(cpu,"ibm,extended-clock-frequency");
+ else
+ core_freq = dt_prop_get_u32(cpu, "clock-frequency");
+ tod_freq = 32000000;
+
+ /* Calculate the "Max Cycles Between Steps" value according
+ * to the magic formula:
+ *
+ * mcbs = (core_freq * max_jitter_factor) / (4 * tod_freq) / 100;
+ *
+ * The max jitter factor is set to 240 based on what pHyp uses.
+ */
+ mcbs = (core_freq * 240) / (4 * tod_freq) / 100;
+ printf("CHIPTOD: Calculated MCBS is 0x%llx (Cfreq=%lld Tfreq=%lld)\n",
+ mcbs, core_freq, tod_freq);
+
+ /* Bake that all into TFMR */
+ base_tfmr = SETFIELD(SPR_TFMR_MAX_CYC_BET_STEPS, base_tfmr, mcbs);
+ base_tfmr = SETFIELD(SPR_TFMR_N_CLKS_PER_STEP, base_tfmr, 0);
+ base_tfmr = SETFIELD(SPR_TFMR_SYNC_BIT_SEL, base_tfmr, 4);
+}
+
+static bool chiptod_mod_tb(void)
+{
+ uint64_t tfmr = base_tfmr;
+ uint64_t timeout = 0;
+
+ /* Switch timebase to "Not Set" state */
+ mtspr(SPR_TFMR, tfmr | SPR_TFMR_LOAD_TOD_MOD);
+ do {
+ if (++timeout >= (TIMEOUT_LOOPS*2)) {
+ prerror("CHIPTOD: TB \"Not Set\" timeout\n");
+ return false;
+ }
+ tfmr = mfspr(SPR_TFMR);
+ if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
+ prerror("CHIPTOD: TB \"Not Set\" TFMR corrupt\n");
+ return false;
+ }
+ if (GETFIELD(SPR_TFMR_TBST_ENCODED, tfmr) == 9) {
+ prerror("CHIPTOD: TB \"Not Set\" TOD in error state\n");
+ return false;
+ }
+ } while(tfmr & SPR_TFMR_LOAD_TOD_MOD);
+
+ return true;
+}
+
+static bool chiptod_interrupt_check(void)
+{
+ uint64_t tfmr = mfspr(SPR_TFMR);
+ uint64_t timeout = 0;
+
+ do {
+ if (++timeout >= TIMEOUT_LOOPS) {
+ prerror("CHIPTOD: Interrupt check fail\n");
+ return false;
+ }
+ tfmr = mfspr(SPR_TFMR);
+ if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
+ prerror("CHIPTOD: Interrupt check TFMR corrupt !\n");
+ return false;
+ }
+ } while(tfmr & SPR_TFMR_CHIP_TOD_INTERRUPT);
+
+ return true;
+}
+
+static bool chiptod_poll_running(void)
+{
+ uint64_t timeout = 0;
+ uint64_t tval;
+
+ /* Chip TOD running check */
+ do {
+ if (++timeout >= TIMEOUT_LOOPS) {
+ prerror("CHIPTOD: Running check fail timeout\n");
+ return false;
+ }
+ if (xscom_readme(TOD_CHIPTOD_FSM, &tval) != 0) {
+ prerror("CHIPTOD: XSCOM error polling run\n");
+ return false;
+ }
+ } while(!(tval & 0x0800000000000000UL));
+
+ return true;
+}
+
+static bool chiptod_to_tb(void)
+{
+ uint64_t tval, tfmr, tvbits;
+ uint64_t timeout = 0;
+
+ /* Tell the ChipTOD about our fabric address
+ *
+ * The pib_master value is calculated from the CPU core ID, given in
+ * the PIR. Because we have different core/thread arrangements in the
+ * PIR between p7 and p8, we need to do the calculation differently.
+ *
+ * p7: 0b00001 || 3-bit core id
+ * p8: 0b0001 || 4-bit core id
+ */
+
+ if (xscom_readme(TOD_PIB_MASTER, &tval) != 0) {
+ prerror("CHIPTOD: XSCOM error reading PIB_MASTER\n");
+ return false;
+ }
+ if (chiptod_type == chiptod_p8) {
+ tvbits = (this_cpu()->pir >> 3) & 0xf;
+ tvbits |= 0x10;
+ } else {
+ tvbits = (this_cpu()->pir >> 2) & 0x7;
+ tvbits |= 0x08;
+ }
+ tval &= ~TOD_PIBM_ADDR_CFG_MCAST;
+ tval = SETFIELD(TOD_PIBM_ADDR_CFG_SLADDR, tval, tvbits);
+ if (xscom_writeme(TOD_PIB_MASTER, tval) != 0) {
+ prerror("CHIPTOD: XSCOM error writing PIB_MASTER\n");
+ return false;
+ }
+
+ /* Make us ready to get the TB from the chipTOD */
+ mtspr(SPR_TFMR, base_tfmr | SPR_TFMR_MOVE_CHIP_TOD_TO_TB);
+
+ /* Tell the ChipTOD to send it */
+ if (xscom_writeme(TOD_CHIPTOD_TO_TB, (1ULL << 63)) != 0) {
+ prerror("CHIPTOD: XSCOM error writing CHIPTOD_TO_TB\n");
+ return false;
+ }
+
+ /* Wait for it to complete */
+ timeout = 0;
+ do {
+ if (++timeout >= TIMEOUT_LOOPS) {
+ prerror("CHIPTOD: Chip to TB timeout\n");
+ return false;
+ }
+ tfmr = mfspr(SPR_TFMR);
+ if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
+ prerror("CHIPTOD: MoveToTB: corrupt TFMR !\n");
+ return false;
+ }
+ } while(tfmr & SPR_TFMR_MOVE_CHIP_TOD_TO_TB);
+
+ return true;
+}
+
+static bool chiptod_check_tb_running(void)
+{
+ /* We used to wait for two SYNC pulses in TFMR but that
+ * doesn't seem to occur in sim, so instead we use a
+ * method similar to what pHyp does which is to check for
+ * TFMR SPR_TFMR_TB_VALID and not SPR_TFMR_TFMR_CORRUPT
+ */
+#if 0
+ uint64_t tfmr, timeout;
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ tfmr = mfspr(SPR_TFMR);
+ tfmr &= ~SPR_TFMR_TB_SYNC_OCCURED;
+ mtspr(SPR_TFMR, tfmr);
+ timeout = 0;
+ do {
+ if (++timeout >= TIMEOUT_LOOPS) {
+ prerror("CHIPTOD: No sync pulses\n");
+ return false;
+ }
+ tfmr = mfspr(SPR_TFMR);
+ } while(!(tfmr & SPR_TFMR_TB_SYNC_OCCURED));
+ }
+#else
+ uint64_t tfmr = mfspr(SPR_TFMR);
+
+ return (tfmr & SPR_TFMR_TB_VALID) &&
+ !(tfmr & SPR_TFMR_TFMR_CORRUPT);
+#endif
+ return true;
+}
+
+static void chiptod_reset_tb_errors(void)
+{
+ uint64_t tfmr;
+ unsigned long timeout = 0;
+
+ /* Ask for automatic clear of errors */
+ tfmr = base_tfmr | SPR_TFMR_CLEAR_TB_ERRORS;
+
+ /* Additionally pHyp sets these (write-1-to-clear ?) */
+ tfmr |= SPR_TFMR_TB_MISSING_SYNC;
+ tfmr |= SPR_TFMR_TB_MISSING_STEP;
+ tfmr |= SPR_TFMR_TB_RESIDUE_ERR;
+ mtspr(SPR_TFMR, tfmr);
+
+ /* We have to write "Clear TB Errors" again */
+ tfmr = base_tfmr | SPR_TFMR_CLEAR_TB_ERRORS;
+ mtspr(SPR_TFMR, tfmr);
+
+ do {
+ if (++timeout >= TIMEOUT_LOOPS) {
+ /* Don't actually do anything on error for
+ * now ... not much we can do, panic maybe ?
+ */
+ prerror("CHIPTOD: TB error reset timeout !\n");
+ return;
+ }
+ tfmr = mfspr(SPR_TFMR);
+ if (tfmr & SPR_TFMR_TFMR_CORRUPT) {
+ prerror("CHIPTOD: TB error reset: corrupt TFMR !\n");
+ return;
+ }
+ } while(tfmr & SPR_TFMR_CLEAR_TB_ERRORS);
+}
+
+static void chiptod_cleanup_thread_tfmr(void)
+{
+ uint64_t tfmr = base_tfmr;
+
+ tfmr |= SPR_TFMR_PURR_PARITY_ERR;
+ tfmr |= SPR_TFMR_SPURR_PARITY_ERR;
+ tfmr |= SPR_TFMR_DEC_PARITY_ERR;
+ tfmr |= SPR_TFMR_TFMR_CORRUPT;
+ tfmr |= SPR_TFMR_PURR_OVERFLOW;
+ tfmr |= SPR_TFMR_SPURR_OVERFLOW;
+ mtspr(SPR_TFMR, tfmr);
+}
+
+static void chiptod_reset_tod_errors(void)
+{
+ uint64_t terr;
+
+ /*
+ * At boot, we clear the errors that the firmware is
+ * supposed to handle. List provided by the pHyp folks.
+ */
+
+ terr = TOD_ERR_CRITC_PARITY;
+ terr |= TOD_ERR_PSS_HAMMING_DISTANCE;
+ terr |= TOD_ERR_DELAY_COMPL_PARITY;
+ terr |= TOD_ERR_CTCR_PARITY;
+ terr |= TOD_ERR_TOD_SYNC_CHECK;
+ terr |= TOD_ERR_TOD_FSM_PARITY;
+ terr |= TOD_ERR_TOD_REGISTER_PARITY;
+
+ if (xscom_writeme(TOD_ERROR, terr) != 0) {
+ prerror("CHIPTOD: XSCOM error writing TOD_ERROR !\n");
+ /* Not much we can do here ... abort ? */
+ }
+}
+
+static void chiptod_sync_master(void *data)
+{
+ bool *result = data;
+
+ printf("CHIPTOD: Master sync on CPU PIR 0x%04x...\n", this_cpu()->pir);
+
+ /* Apply base tfmr */
+ mtspr(SPR_TFMR, base_tfmr);
+
+ /* From recipe provided by pHyp folks, reset various errors
+ * before attempting the sync
+ */
+ chiptod_reset_tb_errors();
+
+ /* Cleanup thread tfmr bits */
+ chiptod_cleanup_thread_tfmr();
+
+ /* Reset errors in the chiptod itself */
+ chiptod_reset_tod_errors();
+
+ /* Switch timebase to "Not Set" state */
+ if (!chiptod_mod_tb())
+ goto error;
+ DBG("SYNC MASTER Step 2 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Chip TOD step checkers enable */
+ if (xscom_writeme(TOD_TTYPE_2, (1UL << 63)) != 0) {
+ prerror("CHIPTOD: XSCOM error enabling steppers\n");
+ goto error;
+ }
+
+ DBG("SYNC MASTER Step 3 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Chip TOD interrupt check */
+ if (!chiptod_interrupt_check())
+ goto error;
+ DBG("SYNC MASTER Step 4 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Switch local chiptod to "Not Set" state */
+ if (xscom_writeme(TOD_LOAD_TOD_MOD, (1UL << 63)) != 0) {
+ prerror("CHIPTOD: XSCOM error sending LOAD_TOD_MOD\n");
+ goto error;
+ }
+
+ /* Switch all remote chiptod to "Not Set" state */
+ if (xscom_writeme(TOD_TTYPE_5, (1UL << 63)) != 0) {
+ prerror("CHIPTOD: XSCOM error sending TTYPE_5\n");
+ goto error;
+ }
+
+ /* Chip TOD load initial value */
+ if (xscom_writeme(TOD_CHIPTOD_LOAD_TB, INIT_TB) != 0) {
+ prerror("CHIPTOD: XSCOM error setting init TB\n");
+ goto error;
+ }
+
+ DBG("SYNC MASTER Step 5 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ if (!chiptod_poll_running())
+ goto error;
+ DBG("SYNC MASTER Step 6 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Move chiptod value to core TB */
+ if (!chiptod_to_tb())
+ goto error;
+ DBG("SYNC MASTER Step 7 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Send local chip TOD to all chips TOD */
+ if (xscom_writeme(TOD_TTYPE_4, (1ULL << 63)) != 0) {
+ prerror("CHIPTOD: XSCOM error sending TTYPE_4\n");
+ goto error;
+ }
+
+ /* Check if TB is running */
+ if (!chiptod_check_tb_running())
+ goto error;
+
+ DBG("Master sync completed, TB=%lx\n", mfspr(SPR_TBRL));
+
+ /*
+ * A little delay to make sure the remote chips get up to
+ * speed before we start syncing them.
+ *
+ * We have to do it here because we know our TB is running
+ * while the boot thread TB might not yet.
+ */
+ time_wait_ms(1);
+
+ *result = true;
+ return;
+ error:
+ prerror("CHIPTOD: Master sync failed! TFMR=0x%016lx\n",
+ mfspr(SPR_TFMR));
+ *result = false;
+}
+
+static void chiptod_sync_slave(void *data)
+{
+ bool *result = data;
+
+ /* Only get primaries, not threads */
+ if (this_cpu()->is_secondary) {
+ /* On secondaries we just cleanup the TFMR */
+ chiptod_cleanup_thread_tfmr();
+ *result = true;
+ return;
+ }
+
+ printf("CHIPTOD: Slave sync on CPU PIR 0x%04x...\n", this_cpu()->pir);
+
+ /* Apply base tfmr */
+ mtspr(SPR_TFMR, base_tfmr);
+
+ /* From recipe provided by pHyp folks, reset various errors
+ * before attempting the sync
+ */
+ chiptod_reset_tb_errors();
+
+ /* Cleanup thread tfmr bits */
+ chiptod_cleanup_thread_tfmr();
+
+ /* Switch timebase to "Not Set" state */
+ if (!chiptod_mod_tb())
+ goto error;
+ DBG("SYNC SLAVE Step 2 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Chip TOD running check */
+ if (!chiptod_poll_running())
+ goto error;
+ DBG("SYNC SLAVE Step 3 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Chip TOD interrupt check */
+ if (!chiptod_interrupt_check())
+ goto error;
+ DBG("SYNC SLAVE Step 4 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Move chiptod value to core TB */
+ if (!chiptod_to_tb())
+ goto error;
+ DBG("SYNC SLAVE Step 5 TFMR=0x%016lx\n", mfspr(SPR_TFMR));
+
+ /* Check if TB is running */
+ if (!chiptod_check_tb_running())
+ goto error;
+
+ DBG("Slave sync completed, TB=%lx\n", mfspr(SPR_TBRL));
+
+ *result = true;
+ return;
+ error:
+ prerror("CHIPTOD: Slave sync failed ! TFMR=0x%016lx\n",
+ mfspr(SPR_TFMR));
+ *result = false;
+}
+
+bool chiptod_wakeup_resync(void)
+{
+ lock(&chiptod_lock);
+
+ /* Apply base tfmr */
+ mtspr(SPR_TFMR, base_tfmr);
+
+ /* From recipe provided by pHyp folks, reset various errors
+ * before attempting the sync
+ */
+ chiptod_reset_tb_errors();
+
+ /* Cleanup thread tfmr bits */
+ chiptod_cleanup_thread_tfmr();
+
+ /* Switch timebase to "Not Set" state */
+ if (!chiptod_mod_tb())
+ goto error;
+
+ /* Move chiptod value to core TB */
+ if (!chiptod_to_tb())
+ goto error;
+
+ unlock(&chiptod_lock);
+
+ return true;
+ error:
+ prerror("CHIPTOD: Resync failed ! TFMR=0x%16lx\n", mfspr(SPR_TFMR));
+ unlock(&chiptod_lock);
+ return false;
+}
+
+static int64_t opal_resync_timebase(void)
+{
+ if (!chiptod_wakeup_resync()) {
+ printf("OPAL: Resync timebase failed on CPU 0x%04x\n",
+ this_cpu()->pir);
+ return OPAL_HARDWARE;
+ }
+ return OPAL_SUCCESS;
+}
+opal_call(OPAL_RESYNC_TIMEBASE, opal_resync_timebase, 0);
+
+static void chiptod_print_tb(void *data __unused)
+{
+ printf("CHIPTOD: PIR 0x%04x TB=%lx\n",
+ this_cpu()->pir, mfspr(SPR_TBRL));
+}
+
+static bool chiptod_probe(u32 master_cpu)
+{
+ struct dt_node *np;
+
+ dt_for_each_compatible(dt_root, np, "ibm,power-chiptod") {
+ uint32_t chip;
+
+ /* Old DT has chip-id in chiptod node, newer only in the
+ * parent xscom bridge
+ */
+ chip = dt_get_chip_id(np);
+
+ if (dt_has_node_property(np, "primary", NULL)) {
+ chiptod_primary = chip;
+ if (dt_node_is_compatible(np,"ibm,power7-chiptod"))
+ chiptod_type = chiptod_p7;
+ if (dt_node_is_compatible(np,"ibm,power8-chiptod"))
+ chiptod_type = chiptod_p8;
+ }
+
+ if (dt_has_node_property(np, "secondary", NULL))
+ chiptod_secondary = chip;
+
+ }
+
+ /*
+ * If ChipTOD isn't found in the device-tree, we fallback
+ * based on the master CPU passed by OPAL boot since the
+ * FSP strips off the ChipTOD info from the HDAT when booting
+ * in OPAL mode :-(
+ */
+ if (chiptod_primary < 0) {
+ struct cpu_thread *t = find_cpu_by_pir(master_cpu);
+ printf("CHIPTOD: Cannot find a primary TOD in device-tree\n");
+ printf("CHIPTOD: Falling back to Master CPU: %d\n", master_cpu);
+ if (!t) {
+ prerror("CHIPTOD: NOT FOUND !\n");
+ return false;
+ }
+ chiptod_primary = t->chip_id;
+ switch(proc_gen) {
+ case proc_gen_p7:
+ chiptod_type = chiptod_p7;
+ return true;
+ case proc_gen_p8:
+ chiptod_type = chiptod_p8;
+ return true;
+ default:
+ break;
+ }
+ prerror("CHIPTOD: Unknown fallback CPU type !\n");
+ return false;
+ }
+ if (chiptod_type == chiptod_unknown) {
+ prerror("CHIPTOD: Unknown TOD type !\n");
+ return false;
+ }
+
+ return true;
+}
+
+void chiptod_init(u32 master_cpu)
+{
+ struct cpu_thread *cpu0, *cpu;
+ bool sres;
+
+ op_display(OP_LOG, OP_MOD_CHIPTOD, 0);
+
+ if (!chiptod_probe(master_cpu)) {
+ prerror("CHIPTOD: Failed ChipTOD detection !\n");
+ op_display(OP_FATAL, OP_MOD_CHIPTOD, 0);
+ abort();
+ }
+
+ op_display(OP_LOG, OP_MOD_CHIPTOD, 1);
+
+ /* Pick somebody on the primary */
+ cpu0 = find_cpu_by_chip_id(chiptod_primary);
+
+ /* Calculate the base TFMR value used for everybody */
+ chiptod_setup_base_tfmr();
+
+ printf("CHIPTOD: Base TFMR=0x%016llx\n", base_tfmr);
+
+ /* Schedule master sync */
+ sres = false;
+ cpu_wait_job(cpu_queue_job(cpu0, chiptod_sync_master, &sres), true);
+ if (!sres) {
+ op_display(OP_FATAL, OP_MOD_CHIPTOD, 2);
+ abort();
+ }
+
+ op_display(OP_LOG, OP_MOD_CHIPTOD, 2);
+
+ /* Schedule slave sync */
+ for_each_available_cpu(cpu) {
+ /* Skip master */
+ if (cpu == cpu0)
+ continue;
+
+ /* Queue job */
+ sres = false;
+ cpu_wait_job(cpu_queue_job(cpu, chiptod_sync_slave, &sres),
+ true);
+ if (!sres) {
+ op_display(OP_WARN, OP_MOD_CHIPTOD, 3|(cpu->pir << 8));
+
+ /* Disable threads */
+ cpu_disable_all_threads(cpu);
+ }
+ op_display(OP_LOG, OP_MOD_CHIPTOD, 3|(cpu->pir << 8));
+ }
+
+ /* Display TBs */
+ for_each_available_cpu(cpu) {
+ /* Only do primaries, not threads */
+ if (cpu->is_secondary)
+ continue;
+ cpu_wait_job(cpu_queue_job(cpu, chiptod_print_tb, NULL), true);
+ }
+
+ op_display(OP_LOG, OP_MOD_CHIPTOD, 4);
+}
diff --git a/hw/ec/Makefile.inc b/hw/ec/Makefile.inc
new file mode 100644
index 0000000..09c9c84
--- /dev/null
+++ b/hw/ec/Makefile.inc
@@ -0,0 +1,8 @@
+# -*-Makefile-*-
+# Sapphire EC makefile
+
+SUBDIRS += hw/ec
+EC_OBJS = gpio.o
+EC=hw/ec/built-in.o
+
+$(EC): $(EC_OBJS:%=hw/ec/%)
diff --git a/hw/ec/gpio.c b/hw/ec/gpio.c
new file mode 100644
index 0000000..0a2223d
--- /dev/null
+++ b/hw/ec/gpio.c
@@ -0,0 +1,87 @@
+/* Copyright 2013-2014 Google Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdint.h>
+#include "ec/config.h"
+#include "ec/gpio.h"
+
+int ec_gpio_setup(EcGpioPort port, uint8_t pin,
+ int is_output, int pullup_enable)
+{
+ uint8_t ddr_reg;
+ if (pin > 7) {
+ return -1;
+ }
+
+ /* Set data direction */
+ ec_outb(EC_GPIO_INDEX,
+ port * EC_GPIO_PORT_SKIP + EC_GPIO_DDR_OFFSET);
+ ddr_reg = ec_inb(EC_GPIO_DATA);
+ if (is_output) {
+ ddr_reg |= (1 << pin);
+ } else {
+ ddr_reg &= ~(1 << pin);
+ }
+ ec_outb(EC_GPIO_DATA, ddr_reg);
+
+ /* Set pullup enable for output GPOs */
+ if (is_output)
+ {
+ uint8_t pup_reg;
+ ec_outb(EC_GPIO_INDEX,
+ port * EC_GPIO_PORT_SKIP + EC_GPIO_PUP_OFFSET);
+ pup_reg = ec_inb(EC_GPIO_DATA);
+ if (pullup_enable) {
+ pup_reg |= (1 << pin);
+ } else {
+ pup_reg &= ~(1 << pin);
+ }
+ ec_outb(EC_GPIO_DATA, pup_reg);
+ }
+
+ return 0;
+}
+
+int ec_gpio_read(EcGpioPort port, uint8_t pin)
+{
+ uint8_t pin_reg;
+ if (pin > 7) {
+ return -1;
+ }
+
+ ec_outb(EC_GPIO_INDEX,
+ port * EC_GPIO_PORT_SKIP + EC_GPIO_PIN_OFFSET);
+ pin_reg = ec_inb(EC_GPIO_DATA);
+ return !!(pin_reg & (1 << pin));
+}
+
+int ec_gpio_set(EcGpioPort port, uint8_t pin, int val)
+{
+ uint8_t data_reg;
+ if (pin > 7) {
+ return -1;
+ }
+
+ ec_outb(EC_GPIO_INDEX,
+ port * EC_GPIO_PORT_SKIP + EC_GPIO_DATA_OFFSET);
+ data_reg = ec_inb(EC_GPIO_DATA);
+ if (val) {
+ data_reg |= (1 << pin);
+ } else {
+ data_reg &= ~(1 << pin);
+ }
+ ec_outb(EC_GPIO_DATA, data_reg);
+ return 0;
+}
diff --git a/hw/ec/makefile b/hw/ec/makefile
new file mode 100644
index 0000000..e6ceafa
--- /dev/null
+++ b/hw/ec/makefile
@@ -0,0 +1,8 @@
+ROOTPATH = ../../..
+MODULE = ec
+
+OBJS = cmosdd.o gpio.o rhesus.o hostboot.o
+
+SUBDIRS = test.d
+
+include ${ROOTPATH}/config.mk
diff --git a/hw/fsi-master.c b/hw/fsi-master.c
new file mode 100644
index 0000000..67d337a
--- /dev/null
+++ b/hw/fsi-master.c
@@ -0,0 +1,297 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <xscom.h>
+#include <lock.h>
+#include <timebase.h>
+#include <chip.h>
+#include <fsi-master.h>
+
+//#define DBG(fmt...) printf("MFSI: " fmt)
+#define DBG(fmt...) do { } while(0)
+
+
+/*
+ * FSI Masters sit on OPB busses behind PIB2OPB bridges
+ *
+ * There are two cMFSI behind two different bridges at
+ * different XSCOM addresses. For now we don't have them in
+ * the device-tree so we hard code the address
+ */
+#define PIB2OPB_MFSI0_ADDR 0x20000
+#define PIB2OPB_MFSI1_ADDR 0x30000
+
+/*
+ * Bridge registers on XSCOM that allow generatoin
+ * of OPB cycles
+ */
+#define PIB2OPB_REG_CMD 0x0
+#define OPB_CMD_WRITE 0x80000000
+#define OPB_CMD_READ 0x00000000
+#define OPB_CMD_8BIT 0x00000000
+#define OPB_CMD_16BIT 0x20000000
+#define OPB_CMD_32BIT 0x60000000
+#define PIB2OPB_REG_STAT 0x1
+#define OPB_STAT_BUSY 0x00010000
+#define OPB_STAT_READ_VALID 0x00020000
+#define OPB_STAT_ERR_OPB 0x09F00000
+#define OPB_STAT_ERR_CMFSI 0x0000FC00
+#define OPB_STAT_ERR_MFSI 0x000000FC
+#define OPB_STAT_ERR_ANY (OPB_STAT_ERR_OPB | \
+ OPB_STAT_ERR_CMFSI | \
+ OPB_STAT_ERR_MFSI)
+#define PIB2OPB_REG_LSTAT 0x2
+
+/*
+ * PIB2OPB 0 has 2 MFSIs, cMFSI and hMFSI, PIB2OPB 1 only
+ * has cMFSI
+ */
+#define cMFSI_OPB_PORT_BASE 0x40000
+#define cMFSI_OPB_REG_BASE 0x03000
+#define hMFSI_OPB_PORT_BASE 0x80000
+#define hMFSI_OPB_REG_BASE 0x03400
+#define MFSI_OPB_PORT_STRIDE 0x08000
+
+
+/*
+ * Use a global FSI lock for now. Beware of re-entrancy
+ * if we ever add support for normal chip XSCOM via FSI, in
+ * which case we'll probably have to consider either per chip
+ * lock (which can have AB->BA deadlock issues) or a re-entrant
+ * global lock
+ */
+static struct lock fsi_lock = LOCK_UNLOCKED;
+static uint32_t mfsi_valid_err = OPB_STAT_ERR_ANY;
+
+/*
+ * OPB accessors
+ */
+
+#define MFSI_OPB_MAX_TRIES 120
+
+static int64_t mfsi_handle_opb_error(uint32_t chip, uint32_t xscom_base,
+ uint32_t stat)
+{
+ int64_t rc;
+
+ prerror("MFSI: Error status=0x%08x !\n", stat);
+
+ /* XXX Dump a bunch of data, create an error log ... */
+
+ /* Clean error */
+ rc = xscom_write(chip, xscom_base + PIB2OPB_REG_STAT, 0);
+ if (rc)
+ prerror("MFSI: XSCOM error %lld clearing status\n", rc);
+
+ /*
+ * XXX HB resets the ports here, but that's broken as it will
+ * re-enter the opb accessors ... the HW is a mess here, it mixes
+ * the OPB stuff with the FSI stuff in horrible ways.
+ * If we want to reset the port and generally handle FSI specific
+ * errors we should do that at the upper level and leave only the
+ * OPB error handling here.
+ *
+ * We probably need to return "stat" to the callers too for that
+ * to work
+ */
+
+ return OPAL_HARDWARE;
+}
+
+static int64_t mfsi_opb_poll(uint32_t chip, uint32_t xscom_base,
+ uint32_t *read_data)
+{
+ unsigned long retries = MFSI_OPB_MAX_TRIES;
+ uint64_t sval;
+ uint32_t stat;
+ int64_t rc;
+
+ /* We try again every 10us for a bit more than 1ms */
+ for (;;) {
+ /* Read OPB status register */
+ rc = xscom_read(chip, xscom_base + PIB2OPB_REG_STAT, &sval);
+ if (rc) {
+ /* Do something here ? */
+ prerror("MFSI: XSCOM error %lld read OPB STAT\n", rc);
+ return rc;
+ }
+ DBG(" STAT=0x%16llx...\n", sval);
+
+ stat = sval >> 32;
+
+ /* Complete */
+ if (!(stat & OPB_STAT_BUSY))
+ break;
+ /* Error */
+ if (stat & mfsi_valid_err)
+ break;
+ if (retries-- == 0) {
+ /* XXX What should we do here ? reset it ? */
+ prerror("MFSI: OPB POLL timeout !\n");
+ return OPAL_HARDWARE;
+ }
+ time_wait_us(10);
+ }
+
+ /* Did we have an error ? */
+ if (stat & mfsi_valid_err)
+ return mfsi_handle_opb_error(chip, xscom_base, stat);
+
+ if (read_data) {
+ if (!(stat & OPB_STAT_READ_VALID)) {
+ prerror("MFSI: Read successful but no data !\n");
+ /* What do do here ? can it actually happen ? */
+ sval |= 0xffffffff;
+ }
+ *read_data = sval & 0xffffffff;
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t mfsi_opb_read(uint32_t chip, uint32_t xscom_base,
+ uint32_t addr, uint32_t *data)
+{
+ uint64_t opb_cmd = OPB_CMD_READ | OPB_CMD_32BIT;
+ int64_t rc;
+
+ if (addr > 0x00ffffff)
+ return OPAL_PARAMETER;
+
+ opb_cmd |= addr;
+ opb_cmd <<= 32;
+
+ DBG("MFSI_OPB_READ: Writing 0x%16llx to XSCOM %x\n",
+ opb_cmd, xscom_base);
+
+ rc = xscom_write(chip, xscom_base + PIB2OPB_REG_CMD, opb_cmd);
+ if (rc) {
+ prerror("MFSI: XSCOM error %lld writing OPB CMD\n", rc);
+ return rc;
+ }
+ return mfsi_opb_poll(chip, xscom_base, data);
+}
+
+static int64_t mfsi_opb_write(uint32_t chip, uint32_t xscom_base,
+ uint32_t addr, uint32_t data)
+{
+ uint64_t opb_cmd = OPB_CMD_WRITE | OPB_CMD_32BIT;
+ int64_t rc;
+
+ if (addr > 0x00ffffff)
+ return OPAL_PARAMETER;
+
+ opb_cmd |= addr;
+ opb_cmd <<= 32;
+ opb_cmd |= data;
+
+ DBG("MFSI_OPB_WRITE: Writing 0x%16llx to XSCOM %x\n",
+ opb_cmd, xscom_base);
+
+ rc = xscom_write(chip, xscom_base + PIB2OPB_REG_CMD, opb_cmd);
+ if (rc) {
+ prerror("MFSI: XSCOM error %lld writing OPB CMD\n", rc);
+ return rc;
+ }
+ return mfsi_opb_poll(chip, xscom_base, NULL);
+}
+
+static int64_t mfsi_get_addrs(uint32_t mfsi, uint32_t port,
+ uint32_t *xscom_base, uint32_t *port_base,
+ uint32_t *reg_base)
+{
+ if (port > 7)
+ return OPAL_PARAMETER;
+
+ /* We hard code everything for now */
+ switch(mfsi) {
+ case MFSI_cMFSI0:
+ *xscom_base = PIB2OPB_MFSI0_ADDR;
+ *port_base = cMFSI_OPB_PORT_BASE + port * MFSI_OPB_PORT_STRIDE;
+ *reg_base = cMFSI_OPB_REG_BASE;
+ break;
+ case MFSI_cMFSI1:
+ *xscom_base = PIB2OPB_MFSI1_ADDR;
+ *port_base = cMFSI_OPB_PORT_BASE + port * MFSI_OPB_PORT_STRIDE;
+ *reg_base = cMFSI_OPB_REG_BASE;
+ break;
+ case MFSI_hMFSI0:
+ *xscom_base = PIB2OPB_MFSI0_ADDR;
+ *port_base = hMFSI_OPB_PORT_BASE + port * MFSI_OPB_PORT_STRIDE;
+ *reg_base = hMFSI_OPB_REG_BASE;
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+ return OPAL_SUCCESS;
+}
+
+int64_t mfsi_read(uint32_t chip, uint32_t mfsi, uint32_t port,
+ uint32_t fsi_addr, uint32_t *data)
+{
+ int64_t rc;
+ uint32_t xscom, port_addr, reg;
+
+ rc = mfsi_get_addrs(mfsi, port, &xscom, &port_addr, &reg);
+ if (rc)
+ return rc;
+ lock(&fsi_lock);
+ rc = mfsi_opb_read(chip, xscom, port_addr + fsi_addr, data);
+ /* XXX Handle FSI level errors here, maybe reset port */
+ unlock(&fsi_lock);
+
+ return rc;
+}
+
+int64_t mfsi_write(uint32_t chip, uint32_t mfsi, uint32_t port,
+ uint32_t fsi_addr, uint32_t data)
+{
+ int64_t rc;
+ uint32_t xscom, port_addr, reg;
+
+ rc = mfsi_get_addrs(mfsi, port, &xscom, &port_addr, &reg);
+ if (rc)
+ return rc;
+ lock(&fsi_lock);
+ rc = mfsi_opb_write(chip, xscom, port_addr + fsi_addr, data);
+ /* XXX Handle FSI level errors here, maybe reset port */
+ unlock(&fsi_lock);
+
+ return rc;
+}
+
+void mfsi_init(void)
+{
+ struct proc_chip *chip;
+
+ /* For now assume all chips are the same DD... might need
+ * fixing.
+ */
+ chip = next_chip(NULL);
+ assert(chip);
+ if (chip->type == PROC_CHIP_P8_MURANO) {
+ /* Hardware Bug HW222712 on Murano DD1.0 causes the
+ * any_error bit to be un-clearable so we just
+ * have to ignore it
+ */
+ if (chip->ec_level < 0x20) {
+ /* 16: cMFSI any-master-error */
+ /* 24: hMFSI any-master-error */
+ mfsi_valid_err &= 0xFFFF7F7F;
+ }
+ }
+}
+
diff --git a/hw/fsp/Makefile.inc b/hw/fsp/Makefile.inc
new file mode 100644
index 0000000..c16d060
--- /dev/null
+++ b/hw/fsp/Makefile.inc
@@ -0,0 +1,9 @@
+SUBDIRS += hw/fsp
+
+FSP_OBJS = fsp.o fsp-console.o fsp-rtc.o fsp-nvram.o fsp-sysparam.o
+FSP_OBJS += fsp-surveillance.o fsp-codeupdate.o fsp-sensor.o
+FSP_OBJS += fsp-diag.o fsp-leds.o fsp-mem-err.o fsp-op-panel.o
+FSP_OBJS += fsp-elog-read.o fsp-elog-write.o
+FSP_OBJS += fsp-dump.o fsp-mdst-table.o
+FSP = hw/fsp/built-in.o
+$(FSP): $(FSP_OBJS:%=hw/fsp/%)
diff --git a/hw/fsp/fsp-codeupdate.c b/hw/fsp/fsp-codeupdate.c
new file mode 100644
index 0000000..be705a4
--- /dev/null
+++ b/hw/fsp/fsp-codeupdate.c
@@ -0,0 +1,1197 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <fsp-sysparam.h>
+#include <lock.h>
+#include <codeupdate.h>
+#include <device.h>
+#include <ccan/endian/endian.h>
+#include <fsp-elog.h>
+
+enum flash_state {
+ FLASH_STATE_ABSENT,
+ FLASH_STATE_INVALID, /* IPL side marker lid is invalid */
+ FLASH_STATE_READING,
+ FLASH_STATE_READ,
+};
+
+enum lid_fetch_side {
+ FETCH_T_SIDE_ONLY,
+ FETCH_P_SIDE_ONLY,
+ FETCH_BOTH_SIDE,
+};
+
+static enum flash_state flash_state = FLASH_STATE_INVALID;
+static enum lid_fetch_side lid_fetch_side = FETCH_BOTH_SIDE;
+
+/* Image buffers */
+static struct opal_sg_list *image_data;
+static uint32_t tce_start;
+static void *lid_data;
+static char validate_buf[VALIDATE_BUF_SIZE];
+
+/* TCE buffer lock */
+static struct lock flash_lock = LOCK_UNLOCKED;
+
+/* FW VPD data */
+static struct fw_image_vpd fw_vpd[2];
+
+/* Code update related sys parameters */
+static uint32_t ipl_side;
+static uint32_t hmc_managed;
+static uint32_t update_policy;
+static uint32_t in_flight_params;
+
+/* If non-NULL, this gets called just before rebooting */
+int (*fsp_flash_term_hook)(void);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE,
+ OPAL_PREDICTIVE_ERR_GENERAL, OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_FLASH, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_SG_LIST, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_COMMIT, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_MSG, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_NOTIFY, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_CU_MARKER_LID, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+static inline void code_update_tce_map(uint32_t tce_offset,
+ void *buffer, uint32_t size)
+{
+ uint32_t tlen = ALIGN_UP(size, TCE_PSIZE);
+
+ fsp_tce_map(PSI_DMA_CODE_UPD + tce_offset, buffer, tlen);
+}
+
+static inline void code_update_tce_unmap(uint32_t size)
+{
+ fsp_tce_unmap(PSI_DMA_CODE_UPD, size);
+}
+
+static inline void set_def_fw_version(uint32_t side)
+{
+ strncpy(fw_vpd[side].MI_keyword, FW_VERSION_UNKNOWN, MI_KEYWORD_SIZE);
+ strncpy(fw_vpd[side].ext_fw_id, FW_VERSION_UNKNOWN, ML_KEYWORD_SIZE);
+}
+
+/*
+ * Get IPL side
+ */
+static void get_ipl_side(void)
+{
+ struct dt_node *iplp;
+ const char *side = NULL;
+
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp)
+ side = dt_prop_get_def(iplp, "cec-ipl-side", NULL);
+ printf("CUPD: IPL SIDE = %s\n", side);
+
+ if (!side || !strcmp(side, "temp"))
+ ipl_side = FW_IPL_SIDE_TEMP;
+ else
+ ipl_side = FW_IPL_SIDE_PERM;
+}
+
+
+/*
+ * Helper routines to retrieve code update related
+ * system parameters from FSP.
+ */
+
+static void inc_in_flight_param(void)
+{
+ lock(&flash_lock);
+ in_flight_params++;
+ unlock(&flash_lock);
+}
+
+static void dec_in_flight_param(void)
+{
+ lock(&flash_lock);
+ assert(in_flight_params > 0);
+ in_flight_params--;
+ unlock(&flash_lock);
+}
+
+static void got_code_update_policy(uint32_t param_id __unused, int err_len,
+ void *data __unused)
+{
+ if (err_len != 4) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT), "CUPD: Error "
+ "retrieving code update policy: %d\n", err_len);
+ } else
+ printf("CUPD: Code update policy from FSP: %d\n",
+ update_policy);
+
+ dec_in_flight_param();
+}
+
+static void get_code_update_policy(void)
+{
+ int rc;
+
+ inc_in_flight_param();
+ rc = fsp_get_sys_param(SYS_PARAM_FLASH_POLICY, &update_policy, 4,
+ got_code_update_policy, NULL);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT),
+ "CUPD: Error %d queueing param request\n", rc);
+ dec_in_flight_param();
+ }
+}
+
+static void got_platform_hmc_managed(uint32_t param_id __unused, int err_len,
+ void *data __unused)
+{
+ if (err_len != 4) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT), "CUPD: Error "
+ "retrieving hmc managed status: %d\n", err_len);
+ } else
+ printf("CUPD: HMC managed status from FSP: %d\n", hmc_managed);
+
+ dec_in_flight_param();
+}
+
+static void get_platform_hmc_managed(void)
+{
+ int rc;
+
+ inc_in_flight_param();
+ rc = fsp_get_sys_param(SYS_PARAM_HMC_MANAGED, &hmc_managed, 4,
+ got_platform_hmc_managed, NULL);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT),
+ "FLASH: Error %d queueing param request\n", rc);
+ dec_in_flight_param();
+ }
+}
+
+static int64_t code_update_check_state(void)
+{
+ switch(flash_state) {
+ case FLASH_STATE_ABSENT:
+ return OPAL_HARDWARE;
+ case FLASH_STATE_INVALID:
+ return OPAL_INTERNAL_ERROR;
+ case FLASH_STATE_READING:
+ return OPAL_BUSY;
+ default:
+ break;
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Get common marker LID additional data section
+ */
+static void *get_adf_sec_data(struct com_marker_adf_sec *adf_sec,
+ uint32_t name)
+{
+ struct com_marker_adf_header *adf_header;
+ int i;
+
+ adf_header = (void *)adf_sec->adf_data;
+ for (i = 0; i < be32_to_cpu(adf_sec->adf_cnt); i++) {
+ if (be32_to_cpu(adf_header->name) == name)
+ return adf_header;
+
+ adf_header = (void *)adf_header + be32_to_cpu(adf_header->size);
+ }
+ return NULL;
+}
+
+/*
+ * Parse common marker LID to get FW version details
+ *
+ * Note:
+ * At present, we are parsing "Service Pack Nomenclature ADF"
+ * section only. If we are adding FW IP support, then we have
+ * to parse "Firmware IP Protection ADF" as well.
+ */
+static void parse_marker_lid(uint32_t side)
+{
+ struct com_marker_header *header;
+ struct com_marker_mi_section *mi_sec;
+ struct com_marker_adf_sec *adf_sec;
+ struct com_marker_adf_sp *adf_sp;
+
+ header = (void *)lid_data;
+
+ /* Get MI details */
+ mi_sec = (void *)header + be32_to_cpu(header->MI_offset);
+ /*
+ * If Marker LID is invalid, then FSP will return a Marker
+ * LID with ASCII zeros for the entire MI keyword.
+ */
+ if (mi_sec->MI_keyword[0] == '0')
+ return;
+
+ strncpy(fw_vpd[side].MI_keyword, mi_sec->MI_keyword, MI_KEYWORD_SIZE);
+ fw_vpd[side].MI_keyword[MI_KEYWORD_SIZE - 1] = '\0';
+ printf("CUPD: %s side MI Keyword = %s\n",
+ side == 0x00 ? "P" : "T", fw_vpd[side].MI_keyword);
+
+ /* Get ML details */
+ adf_sec = (void *)header + be32_to_cpu(mi_sec->adf_offset);
+ adf_sp = get_adf_sec_data(adf_sec, ADF_NAME_SP);
+ if (!adf_sp)
+ return;
+
+ strncpy(fw_vpd[side].ext_fw_id,
+ (void *)adf_sp + be32_to_cpu(adf_sp->sp_name_offset),
+ ML_KEYWORD_SIZE);
+ fw_vpd[side].ext_fw_id[ML_KEYWORD_SIZE - 1] = '\0';
+ printf("CUPD: %s side ML Keyword = %s\n",
+ side == 0x00 ? "P" : "T", fw_vpd[side].ext_fw_id);
+}
+
+static void validate_com_marker_lid(void)
+{
+ if (!strncmp(fw_vpd[ipl_side].MI_keyword, FW_VERSION_UNKNOWN,
+ sizeof(FW_VERSION_UNKNOWN))) {
+ log_simple_error(&e_info(OPAL_RC_CU_MARKER_LID),
+ "CUPD: IPL side Marker LID is not valid\n");
+ flash_state = FLASH_STATE_INVALID;
+ return;
+ }
+
+ flash_state = FLASH_STATE_READ;
+}
+
+static void fetch_lid_data_complete(struct fsp_msg *msg)
+{
+ void *buffer;
+ size_t length, chunk;
+ uint32_t lid_id, offset;
+ uint16_t id;
+ uint8_t flags, status;
+
+ status = (msg->resp->word1 >> 8) & 0xff;
+ flags = (msg->data.words[0] >> 16) & 0xff;
+ id = msg->data.words[0] & 0xffff;
+ lid_id = msg->data.words[1];
+ offset = msg->resp->data.words[1];
+ length = msg->resp->data.words[2];
+
+ printf("CUPD: Marker LID id : size : status = 0x%x : 0x%x : 0x%x\n",
+ msg->data.words[1], msg->resp->data.words[2], status);
+
+ fsp_freemsg(msg);
+
+ switch (status) {
+ case FSP_STATUS_SUCCESS: /* Read complete, parse VPD */
+ parse_marker_lid(lid_id == P_COM_MARKER_LID_ID ? 0 : 1);
+ break;
+ case FSP_STATUS_MORE_DATA: /* More data left */
+ offset += length;
+ chunk = MARKER_LID_SIZE - offset;
+ if (chunk > 0) {
+ buffer = (void *)PSI_DMA_CODE_UPD + offset;
+ fsp_fetch_data_queue(flags, id, lid_id,
+ offset, buffer, &chunk,
+ fetch_lid_data_complete);
+ return;
+ }
+ break;
+ default: /* Fetch LID call failed */
+ break;
+ }
+
+ /* If required, fetch T side marker LID */
+ if (lid_id == P_COM_MARKER_LID_ID &&
+ lid_fetch_side == FETCH_BOTH_SIDE) {
+ length = MARKER_LID_SIZE;
+ fsp_fetch_data_queue(flags, id, T_COM_MARKER_LID_ID,
+ 0, (void *)PSI_DMA_CODE_UPD,
+ &length, fetch_lid_data_complete);
+ return;
+ }
+
+ lock(&flash_lock);
+
+ /* Validate marker LID data */
+ validate_com_marker_lid();
+ /* TCE unmap */
+ code_update_tce_unmap(MARKER_LID_SIZE);
+
+ unlock(&flash_lock);
+}
+
+static void fetch_com_marker_lid(void)
+{
+ size_t length = MARKER_LID_SIZE;
+ uint32_t lid_id;
+ int rc;
+
+ /* Read in progress? */
+ rc = code_update_check_state();
+ if (rc == OPAL_HARDWARE || rc == OPAL_BUSY)
+ return;
+
+ if (lid_fetch_side == FETCH_T_SIDE_ONLY) {
+ lid_id = T_COM_MARKER_LID_ID;
+ set_def_fw_version(FW_IPL_SIDE_TEMP);
+ } else if (lid_fetch_side == FETCH_P_SIDE_ONLY) {
+ lid_id = P_COM_MARKER_LID_ID;
+ set_def_fw_version(FW_IPL_SIDE_PERM);
+ } else {
+ lid_id = P_COM_MARKER_LID_ID;
+ set_def_fw_version(FW_IPL_SIDE_PERM);
+ set_def_fw_version(FW_IPL_SIDE_TEMP);
+ }
+
+ code_update_tce_map(0, lid_data, length);
+ rc = fsp_fetch_data_queue(0x00, 0x05, lid_id, 0,
+ (void *)PSI_DMA_CODE_UPD, &length,
+ fetch_lid_data_complete);
+ if (!rc)
+ flash_state = FLASH_STATE_READING;
+ else
+ flash_state = FLASH_STATE_INVALID;
+}
+
+/*
+ * Add MI and ML keyword details into DT
+ */
+#define FW_VER_SIZE 64
+static void add_opal_firmware_version(void)
+{
+ struct dt_node *dt_fw;
+ char buffer[FW_VER_SIZE];
+ int offset;
+
+ dt_fw = dt_find_by_path(dt_root, "ibm,opal/firmware");
+ if (!dt_fw)
+ return;
+
+ /* MI version */
+ offset = snprintf(buffer, FW_VER_SIZE, "MI %s %s",
+ fw_vpd[FW_IPL_SIDE_TEMP].MI_keyword,
+ fw_vpd[FW_IPL_SIDE_PERM].MI_keyword);
+ if (ipl_side == FW_IPL_SIDE_TEMP)
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_TEMP].MI_keyword);
+ else
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_PERM].MI_keyword);
+
+ dt_add_property(dt_fw, "mi-version", buffer, strlen(buffer));
+
+ /* ML version */
+ offset = snprintf(buffer, FW_VER_SIZE, "ML %s %s",
+ fw_vpd[FW_IPL_SIDE_TEMP].ext_fw_id,
+ fw_vpd[FW_IPL_SIDE_PERM].ext_fw_id);
+ if (ipl_side == FW_IPL_SIDE_TEMP)
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_TEMP].ext_fw_id);
+ else
+ snprintf(buffer + offset, FW_VER_SIZE - offset,
+ " %s", fw_vpd[FW_IPL_SIDE_PERM].ext_fw_id);
+
+ dt_add_property(dt_fw, "ml-version", buffer, strlen(buffer));
+}
+
+/*
+ * This is called right before starting the payload (Linux) to
+ * ensure the common marker LID read and parsing has happened
+ * before we transfer control.
+ */
+void fsp_code_update_wait_vpd(bool is_boot)
+{
+ if (!fsp_present())
+ return;
+
+ printf("CUPD: Waiting read marker LID completion...\n");
+
+ while(flash_state == FLASH_STATE_READING)
+ fsp_poll();
+
+ printf("CUPD: Waiting in flight params completion...\n");
+ while(in_flight_params)
+ fsp_poll();
+
+ if (is_boot)
+ add_opal_firmware_version();
+}
+
+static int code_update_start(void)
+{
+ struct fsp_msg *msg;
+ int rc;
+ uint16_t comp = 0x00; /* All components */
+ uint8_t side = OPAL_COMMIT_TMP_SIDE; /* Temporary side */
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_START, 1, side << 16 | comp);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_START message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_write_lid(uint32_t lid_id, uint32_t size)
+{
+ struct fsp_msg *msg;
+ int rc, n_pairs = 1;
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_WRITE, 5, lid_id,
+ n_pairs, 0, tce_start, size);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_WRITE message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_del_lid(uint32_t lid_id)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_DEL, 1, lid_id);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_DEL message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_complete(uint32_t cmd)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(cmd, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CUPD COMPLETE message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_swap_side(void)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_FLASH_SWAP, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_FLASH_SWAP message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static int code_update_set_ipl_side(void)
+{
+ struct fsp_msg *msg;
+ uint8_t side = FW_IPL_SIDE_TEMP; /* Next IPL side */
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_SET_IPL_SIDE, 1, side << 16);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: CMD_SET_IPL_SIDE message allocation failed!\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_sync_msg(msg, false)) {
+ fsp_freemsg(msg);
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: Setting next IPL side failed!\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static void code_update_commit_complete(struct fsp_msg *msg)
+{
+ int rc;
+ uint8_t type;
+
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ type = (msg->word1 >> 8) & 0xff;
+ fsp_freemsg(msg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_COMMIT),
+ "CUPD: Code update commit failed, err 0x%x\n", rc);
+ return;
+ }
+
+ /* Reset cached VPD data */
+ lock(&flash_lock);
+
+ /* Find commit type */
+ if (type == 0x01) {
+ lid_fetch_side = FETCH_P_SIDE_ONLY;
+ } else if (type == 0x02)
+ lid_fetch_side = FETCH_T_SIDE_ONLY;
+ else
+ lid_fetch_side = FETCH_BOTH_SIDE;
+
+ fetch_com_marker_lid();
+
+ unlock(&flash_lock);
+}
+
+static int code_update_commit(uint32_t cmd)
+{
+ struct fsp_msg *msg;
+
+ msg = fsp_mkmsg(cmd, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_CU_MSG),
+ "CUPD: COMMIT message allocation failed !\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(msg, code_update_commit_complete)) {
+ log_simple_error(&e_info(OPAL_RC_CU_COMMIT),
+ "CUPD: Failed to queue code update commit message\n");
+ fsp_freemsg(msg);
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Inband code update is allowed?
+ */
+static int64_t validate_inband_policy(void)
+{
+ /* Quirk:
+ * If the code update policy is out-of-band, but the system
+ * is not HMC-managed, then inband update is allowed.
+ */
+ if (hmc_managed != PLATFORM_HMC_MANAGED)
+ return 0;
+ if (update_policy == INBAND_UPDATE_ALLOWED)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * Validate magic Number
+ */
+static int64_t validate_magic_num(uint16_t magic)
+{
+ if (magic != IMAGE_MAGIC_NUMBER)
+ return -1;
+ return 0;
+}
+
+/*
+ * Compare MI keyword to make sure candidate image
+ * is valid for this platform.
+ */
+static int64_t validate_image_version(struct update_image_header *header,
+ uint32_t *result)
+{
+ struct fw_image_vpd vpd;
+ int t_valid = 0, p_valid = 0, cton_ver = -1, ptot_ver = -1;
+
+ /* Valid flash image level? */
+ if (strncmp(fw_vpd[0].MI_keyword, FW_VERSION_UNKNOWN,
+ sizeof(FW_VERSION_UNKNOWN)) != 0)
+ p_valid = 1;
+
+ if (strncmp(fw_vpd[1].MI_keyword, FW_VERSION_UNKNOWN,
+ sizeof(FW_VERSION_UNKNOWN)) != 0)
+ t_valid = 1;
+
+ /* Validate with IPL side image */
+ vpd = fw_vpd[ipl_side];
+
+ /* Validate platform identifier (first two char of MI keyword) */
+ if (strncmp(vpd.MI_keyword, header->MI_keyword_data, 2) != 0) {
+ *result = VALIDATE_INVALID_IMG;
+ return OPAL_SUCCESS;
+ }
+
+ /* Don't flash different FW series (like P7 image on P8) */
+ if (vpd.MI_keyword[2] != header->MI_keyword_data[2]) {
+ *result = VALIDATE_INVALID_IMG;
+ return OPAL_SUCCESS;
+ }
+
+ /* Get current to new version difference */
+ cton_ver = strncmp(vpd.MI_keyword + 3, header->MI_keyword_data + 3, 6);
+
+ /* Get P to T version difference */
+ if (t_valid && p_valid)
+ ptot_ver = strncmp(fw_vpd[0].MI_keyword + 3,
+ fw_vpd[1].MI_keyword + 3, 6);
+
+ /* Update validation result */
+ if (ipl_side == FW_IPL_SIDE_TEMP) {
+ if (!ptot_ver && cton_ver > 0) /* downgrade T side */
+ *result = VALIDATE_TMP_UPDATE_DL;
+ else if (!ptot_ver && cton_ver <= 0) /* upgrade T side */
+ *result = VALIDATE_TMP_UPDATE;
+ else if (cton_ver > 0) /* Implied commit & downgrade T side */
+ *result = VALIDATE_TMP_COMMIT_DL;
+ else /* Implied commit & upgrade T side */
+ *result = VALIDATE_TMP_COMMIT;
+ } else {
+ if (!t_valid) /* Current unknown */
+ *result = VALIDATE_CUR_UNKNOWN;
+ else if (cton_ver > 0) /* downgrade FW version */
+ *result = VALIDATE_TMP_UPDATE_DL;
+ else /* upgrade FW version */
+ *result = VALIDATE_TMP_UPDATE;
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Validate candidate image
+ */
+static int validate_candidate_image(uint64_t buffer,
+ uint32_t size, uint32_t *result)
+{
+ struct update_image_header *header;
+ int rc = OPAL_PARAMETER;
+
+ if (size < VALIDATE_BUF_SIZE)
+ goto out;
+
+ rc = code_update_check_state();
+ if (rc != OPAL_SUCCESS)
+ goto out;
+
+ if (validate_inband_policy() != 0) {
+ *result = VALIDATE_FLASH_AUTH;
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+
+ memcpy(validate_buf, (void *)buffer, VALIDATE_BUF_SIZE);
+ header = (struct update_image_header *)validate_buf;
+
+ if (validate_magic_num(be32_to_cpu(header->magic)) != 0) {
+ *result = VALIDATE_INVALID_IMG;
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+ rc = validate_image_version(header, result);
+out:
+ return rc;
+}
+
+static int validate_out_buf_mi_data(void *buffer, int offset, uint32_t result)
+{
+ struct update_image_header *header = (void *)validate_buf;
+
+ /* Current T & P side MI data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "MI %s %s\n",
+ fw_vpd[1].MI_keyword, fw_vpd[0].MI_keyword);
+
+ /* New T & P side MI data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "MI %s", header->MI_keyword_data);
+ if (result == VALIDATE_TMP_COMMIT_DL ||
+ result == VALIDATE_TMP_COMMIT)
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[1].MI_keyword);
+ else
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[0].MI_keyword);
+ return offset;
+}
+
+static int validate_out_buf_ml_data(void *buffer, int offset, uint32_t result)
+{
+ struct update_image_header *header = (void *)validate_buf;
+ /* Candidate image ML data */
+ char *ext_fw_id = (void *)header->data;
+
+ /* Current T & P side ML data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "ML %s %s\n",
+ fw_vpd[1].ext_fw_id, fw_vpd[0].ext_fw_id);
+
+ /* New T & P side ML data */
+ offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset,
+ "ML %s", ext_fw_id);
+ if (result == VALIDATE_TMP_COMMIT_DL ||
+ result == VALIDATE_TMP_COMMIT)
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[1].ext_fw_id);
+ else
+ offset += snprintf(buffer + offset,
+ VALIDATE_BUF_SIZE - offset,
+ " %s\n", fw_vpd[0].ext_fw_id);
+
+ return offset;
+}
+
+/*
+ * Copy LID data to TCE buffer
+ */
+static int get_lid_data(struct opal_sg_list *list,
+ int lid_size, int lid_offset)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *entry;
+ int length, num_entries, i, buf_pos = 0;
+ int map_act, map_size;
+ bool last = false;
+
+ /* Reset TCE start address */
+ tce_start = 0;
+
+ for (sg = list; sg; sg = sg->next) {
+ length = (sg->length & ~(SG_LIST_VERSION << 56)) - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return -1;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+
+ /*
+ * Continue until we get data block which
+ * contains LID data
+ */
+ if (lid_offset > entry->length) {
+ lid_offset -= entry->length;
+ continue;
+ }
+
+ /*
+ * SG list entry size can be more than 4k.
+ * Map only required pages, instead of
+ * mapping entire entry.
+ */
+ map_act = entry->length;
+ map_size = entry->length;
+
+ /* First TCE mapping */
+ if (!tce_start) {
+ tce_start = PSI_DMA_CODE_UPD +
+ (lid_offset & 0xfff);
+ map_act = entry->length - lid_offset;
+ lid_offset &= ~0xfff;
+ map_size = entry->length - lid_offset;
+ }
+
+ /* Check pending LID size to map */
+ if (lid_size <= map_act) {
+ /* (map_size - map_act) gives page
+ * start to tce offset difference.
+ * This is required when LID size
+ * is <= 4k.
+ */
+ map_size = (map_size - map_act) + lid_size;
+ last = true;
+ }
+
+ /* Ajust remaining size to map */
+ lid_size -= map_act;
+
+ /* TCE mapping */
+ code_update_tce_map(buf_pos, entry->data + lid_offset,
+ map_size);
+ buf_pos += map_size;
+ /* Reset LID offset count */
+ lid_offset = 0;
+
+ if (last)
+ return OPAL_SUCCESS;
+ }
+ } /* outer loop */
+ return -1;
+}
+
+/*
+ * If IPL side is T, then swap P & T sides to add
+ * new fix to T side.
+ */
+static int validate_ipl_side(void)
+{
+ if (ipl_side == FW_IPL_SIDE_PERM)
+ return 0;
+ return code_update_swap_side();
+}
+
+static int64_t fsp_opal_validate_flash(uint64_t buffer,
+ uint32_t *size, uint32_t *result)
+{
+ int64_t rc = 0;
+ int offset;
+
+ lock(&flash_lock);
+
+ rc = validate_candidate_image(buffer, *size, result);
+ /* Fill output buffer
+ *
+ * Format:
+ * MI<sp>current-T-image<sp>current-P-image<0x0A>
+ * MI<sp>new-T-image<sp>new-P-image<0x0A>
+ * ML<sp>current-T-image<sp>current-P-image<0x0A>
+ * ML<sp>new-T-image<sp>new-P-image<0x0A>
+ */
+ if (!rc && (*result != VALIDATE_FLASH_AUTH &&
+ *result != VALIDATE_INVALID_IMG)) {
+ /* Clear output buffer */
+ memset((void *)buffer, 0, VALIDATE_BUF_SIZE);
+
+ offset = validate_out_buf_mi_data((void *)buffer, 0, *result);
+ offset += validate_out_buf_ml_data((void *)buffer,
+ offset, *result);
+ *size = offset;
+ }
+
+ unlock(&flash_lock);
+ return rc;
+}
+
+/* Commit/Reject T side image */
+static int64_t fsp_opal_manage_flash(uint8_t op)
+{
+ uint32_t cmd;
+ int rc;
+
+ lock(&flash_lock);
+ rc = code_update_check_state();
+ unlock(&flash_lock);
+
+ if (rc != OPAL_SUCCESS)
+ return rc;
+
+ if (op != OPAL_REJECT_TMP_SIDE && op != OPAL_COMMIT_TMP_SIDE)
+ return OPAL_PARAMETER;
+
+ if ((op == OPAL_COMMIT_TMP_SIDE && ipl_side == FW_IPL_SIDE_PERM) ||
+ (op == OPAL_REJECT_TMP_SIDE && ipl_side == FW_IPL_SIDE_TEMP))
+ return OPAL_ACTIVE_SIDE_ERR;
+
+ if (op == OPAL_COMMIT_TMP_SIDE)
+ cmd = FSP_CMD_FLASH_NORMAL;
+ else
+ cmd = FSP_CMD_FLASH_REMOVE;
+
+ return code_update_commit(cmd);
+}
+
+static int fsp_flash_firmware(void)
+{
+ struct update_image_header *header;
+ struct lid_index_entry *idx_entry;
+ struct opal_sg_list *list;
+ struct opal_sg_entry *entry;
+ int rc, i;
+
+ lock(&flash_lock);
+
+ /* Make sure no outstanding LID read is in progress */
+ rc = code_update_check_state();
+ if (rc == OPAL_BUSY)
+ fsp_code_update_wait_vpd(false);
+
+ /* Get LID Index */
+ list = image_data;
+ if (!list)
+ goto out;
+ entry = &list->entry[0];
+ header = (struct update_image_header *)entry->data;
+ idx_entry = (void *)header + be16_to_cpu(header->lid_index_offset);
+
+ /* FIXME:
+ * At present we depend on FSP to validate CRC for
+ * individual LIDs. Calculate and validate individual
+ * LID CRC here.
+ */
+
+ if (validate_ipl_side() != 0)
+ goto out;
+
+ /* Set next IPL side */
+ if (code_update_set_ipl_side() != 0)
+ goto out;
+
+ /* Start code update process */
+ if (code_update_start() != 0)
+ goto out;
+
+ /*
+ * Delete T side LIDs before writing.
+ *
+ * Note:
+ * - Applicable for FWv >= 760.
+ * - Current Code Update design is to ignore
+ * any delete lid failure, and continue with
+ * the update.
+ */
+ rc = code_update_del_lid(DEL_UPD_SIDE_LIDS);
+
+ for (i = 0; i < be16_to_cpu(header->number_lids); i++) {
+ if (be32_to_cpu(idx_entry->size) > LID_MAX_SIZE) {
+ log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: "
+ "LID size 0x%x is > max LID size \n",
+ be32_to_cpu(idx_entry->size));
+
+ goto abort_update;
+ }
+
+ rc = get_lid_data(list, be32_to_cpu(idx_entry->size),
+ be32_to_cpu(idx_entry->offset));
+ if (rc)
+ goto abort_update;
+
+ rc = code_update_write_lid(be32_to_cpu(idx_entry->id),
+ be32_to_cpu(idx_entry->size));
+ if (rc)
+ goto abort_update;
+
+ /* Unmap TCE */
+ code_update_tce_unmap(PSI_DMA_CODE_UPD_SIZE);
+
+ /* Next LID index */
+ idx_entry = (void *)idx_entry + sizeof(struct lid_index_entry);
+ }
+
+ /* Code update completed */
+ rc = code_update_complete(FSP_CMD_FLASH_COMPLETE);
+
+ unlock(&flash_lock);
+ return rc;
+
+abort_update:
+ log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: LID update failed "
+ "Aborting codeupdate! rc:%d", rc);
+ rc = code_update_complete(FSP_CMD_FLASH_ABORT);
+out:
+ unlock(&flash_lock);
+ return -1;
+}
+
+static int64_t validate_sglist(struct opal_sg_list *list)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *prev_entry, *entry;
+ int length, num_entries, i;
+
+ prev_entry = NULL;
+ for (sg = list; sg; sg = sg->next) {
+ length = (sg->length & ~(SG_LIST_VERSION << 56)) - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return -1;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+
+ /* All entries must be aligned */
+ if (((uint64_t)entry->data) & 0xfff)
+ return OPAL_PARAMETER;
+
+ /* All non-terminal entries size must be aligned */
+ if (prev_entry && (prev_entry->length & 0xfff))
+ return OPAL_PARAMETER;
+
+ prev_entry = entry;
+ }
+ }
+ return OPAL_SUCCESS;
+}
+
+static int64_t fsp_opal_update_flash(struct opal_sg_list *list)
+{
+ struct opal_sg_entry *entry;
+ int length, num_entries, result = 0, rc = OPAL_PARAMETER;
+
+ /* Ensure that the sg list honors our alignment requirements */
+ rc = validate_sglist(list);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_CU_SG_LIST),
+ "CUPD: sglist fails alignment requirements\n");
+ return rc;
+ }
+
+ lock(&flash_lock);
+ if (!list) { /* Cancel update request */
+ fsp_flash_term_hook = NULL;
+ image_data = NULL;
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+ length = (list->length & ~(SG_LIST_VERSION << 56)) - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ goto out;
+
+ /* Validate image header */
+ entry = &list->entry[0];
+ rc = validate_candidate_image((uint64_t)entry->data,
+ VALIDATE_BUF_SIZE, &result);
+ if (!rc && (result != VALIDATE_FLASH_AUTH &&
+ result != VALIDATE_INVALID_IMG)) {
+ image_data = list;
+ fsp_flash_term_hook = fsp_flash_firmware;
+ goto out;
+ }
+
+ /* Adjust return code */
+ if (result == VALIDATE_FLASH_AUTH)
+ rc = OPAL_FLASH_NO_AUTH;
+ else if (result == VALIDATE_INVALID_IMG)
+ rc = OPAL_INVALID_IMAGE;
+
+out:
+ unlock(&flash_lock);
+ return rc;
+}
+
+/*
+ * Code Update notifications
+ *
+ * Note: At present we just ACK these notifications.
+ * Reset cached VPD data if we are going to support
+ * concurrent image maint in future.
+ */
+static bool code_update_notify(uint32_t cmd_sub_mod, struct fsp_msg *msg)
+{
+ int rc;
+ uint32_t cmd;
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_FLASH_CACHE:
+ cmd = FSP_CMD_FLASH_CACHE_RSP;
+ printf("CUPD: Update LID cache event [data = 0x%x]\n",
+ msg->data.words[0]);
+ break;
+ case FSP_CMD_FLASH_OUTC:
+ case FSP_CMD_FLASH_OUTR:
+ case FSP_CMD_FLASH_OUTS:
+ cmd = FSP_CMD_FLASH_OUT_RSP;
+ printf("CUPD: Out of band commit notify [Type = 0x%x]\n",
+ (msg->word1 >> 8) & 0xff);
+ break;
+ default:
+ log_simple_error(&e_info(OPAL_RC_CU_NOTIFY), "CUPD: Unknown "
+ "notification [cmd = 0x%x]\n", cmd_sub_mod);
+ return false;
+ }
+
+ rc = fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+ if (rc)
+ log_simple_error(&e_info(OPAL_RC_CU_NOTIFY), "CUPD: Failed to "
+ "queue code update notification response :%d\n", rc);
+
+ return true;
+}
+
+static struct fsp_client fsp_get_notify = {
+ .message = code_update_notify,
+};
+
+void fsp_code_update_init(void)
+{
+ if (!fsp_present()) {
+ flash_state = FLASH_STATE_ABSENT;
+ return;
+ }
+
+ /* OPAL interface */
+ opal_register(OPAL_FLASH_VALIDATE, fsp_opal_validate_flash, 3);
+ opal_register(OPAL_FLASH_MANAGE, fsp_opal_manage_flash, 1);
+ opal_register(OPAL_FLASH_UPDATE, fsp_opal_update_flash, 1);
+
+ /* register Code Update Class D3 */
+ fsp_register_client(&fsp_get_notify, FSP_MCLASS_CODE_UPDATE);
+
+ /* Flash hook */
+ fsp_flash_term_hook = NULL;
+
+ /* Fetch various code update related sys parameters */
+ get_ipl_side();
+ get_code_update_policy();
+ get_platform_hmc_managed();
+
+ /* Fetch common marker LID */
+ lid_data = memalign(TCE_PSIZE, MARKER_LID_SIZE);
+ if (!lid_data) {
+ log_simple_error(&e_info(OPAL_RC_CU_INIT),
+ "CUPD: Failed to allocate memory for marker LID\n");
+ flash_state = FLASH_STATE_ABSENT;
+ return;
+ }
+ fetch_com_marker_lid();
+}
diff --git a/hw/fsp/fsp-console.c b/hw/fsp/fsp-console.c
new file mode 100644
index 0000000..725edcc
--- /dev/null
+++ b/hw/fsp/fsp-console.c
@@ -0,0 +1,922 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Service Processor serial console handling code
+ */
+#include <skiboot.h>
+#include <processor.h>
+#include <io.h>
+#include <fsp.h>
+#include <console.h>
+#include <opal.h>
+#include <timebase.h>
+#include <device.h>
+
+struct fsp_serbuf_hdr {
+ u16 partition_id;
+ u8 session_id;
+ u8 hmc_id;
+ u16 data_offset;
+ u16 last_valid;
+ u16 ovf_count;
+ u16 next_in;
+ u8 flags;
+ u8 reserved;
+ u16 next_out;
+ u8 data[];
+};
+#define SER_BUF_DATA_SIZE (0x10000 - sizeof(struct fsp_serbuf_hdr))
+
+struct fsp_serial {
+ bool available;
+ bool open;
+ bool has_part0;
+ bool has_part1;
+ bool log_port;
+ bool out_poke;
+ char loc_code[LOC_CODE_SIZE];
+ u16 rsrc_id;
+ struct fsp_serbuf_hdr *in_buf;
+ struct fsp_serbuf_hdr *out_buf;
+ struct fsp_msg *poke_msg;
+};
+
+#define SER_BUFFER_SIZE 0x00040000UL
+#define MAX_SERIAL 4
+
+static struct fsp_serial fsp_serials[MAX_SERIAL];
+static bool got_intf_query;
+static bool got_assoc_resp;
+static bool got_deassoc_resp;
+static struct lock fsp_con_lock = LOCK_UNLOCKED;
+static void* ser_buffer = NULL;
+
+static void fsp_console_reinit(void)
+{
+ int i;
+ void *base;
+
+ /* Initialize out data structure pointers & TCE maps */
+ base = ser_buffer;
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *ser = &fsp_serials[i];
+
+ ser->in_buf = base;
+ ser->out_buf = base + SER_BUFFER_SIZE/2;
+ base += SER_BUFFER_SIZE;
+ }
+ fsp_tce_map(PSI_DMA_SER0_BASE, ser_buffer,
+ 4 * PSI_DMA_SER0_SIZE);
+
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ printf("FSP: Reassociating HVSI console %d\n", i);
+ got_assoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2,
+ (fs->rsrc_id << 16) | 1, i), true);
+ /* XXX add timeout ? */
+ while(!got_assoc_resp)
+ fsp_poll();
+ }
+}
+
+static void fsp_close_consoles(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+
+ if (!fs->available)
+ continue;
+
+ if (fs->rsrc_id == 0xffff) /* Get clarity from benh */
+ continue;
+
+ lock(&fsp_con_lock);
+ if (fs->open) {
+ fs->open = false;
+ fs->out_poke = false;
+ if (fs->poke_msg->state != fsp_msg_unused)
+ fsp_cancelmsg(fs->poke_msg);
+ fsp_freemsg(fs->poke_msg);
+ fs->poke_msg = NULL;
+ }
+ unlock(&fsp_con_lock);
+ }
+ printf("FSPCON: Closed consoles on account of FSP reset/reload\n");
+}
+
+static void fsp_pokemsg_reclaim(struct fsp_msg *msg)
+{
+ struct fsp_serial *fs = msg->user_data;
+
+ /*
+ * The poke_msg might have been "detached" from the console
+ * in vserial_close, so we need to check whether it's current
+ * before touching the state, otherwise, just free it
+ */
+ lock(&fsp_con_lock);
+ if (fs->open && fs->poke_msg == msg) {
+ if (fs->out_poke) {
+ fs->out_poke = false;
+ fsp_queue_msg(fs->poke_msg, fsp_pokemsg_reclaim);
+ } else
+ fs->poke_msg->state = fsp_msg_unused;
+ } else
+ fsp_freemsg(msg);
+ unlock(&fsp_con_lock);
+}
+
+/* Called with the fsp_con_lock held */
+static size_t fsp_write_vserial(struct fsp_serial *fs, const char *buf,
+ size_t len)
+{
+ struct fsp_serbuf_hdr *sb = fs->out_buf;
+ u16 old_nin = sb->next_in;
+ u16 space, chunk;
+
+ if (!fs->open)
+ return 0;
+
+ space = (sb->next_out + SER_BUF_DATA_SIZE - old_nin - 1)
+ % SER_BUF_DATA_SIZE;
+ if (space < len)
+ len = space;
+ if (!len)
+ return 0;
+
+ chunk = SER_BUF_DATA_SIZE - old_nin;
+ if (chunk > len)
+ chunk = len;
+ memcpy(&sb->data[old_nin], buf, chunk);
+ if (chunk < len)
+ memcpy(&sb->data[0], buf + chunk, len - chunk);
+ lwsync();
+ sb->next_in = (old_nin + len) % SER_BUF_DATA_SIZE;
+ sync();
+
+ if (sb->next_out == old_nin && fs->poke_msg) {
+ if (fs->poke_msg->state == fsp_msg_unused)
+ fsp_queue_msg(fs->poke_msg, fsp_pokemsg_reclaim);
+ else
+ fs->out_poke = true;
+ }
+#ifndef DISABLE_CON_PENDING_EVT
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_OUTPUT,
+ OPAL_EVENT_CONSOLE_OUTPUT);
+#endif
+ return len;
+}
+
+#ifdef DVS_CONSOLE
+static int fsp_con_port = -1;
+static bool fsp_con_full;
+
+/*
+ * This is called by the code in console.c without the con_lock
+ * held. However it can be called as the result of any printf
+ * thus any other lock might be held including possibly the
+ * FSP lock
+ */
+static size_t fsp_con_write(const char *buf, size_t len)
+{
+ size_t written;
+
+ if (fsp_con_port < 0)
+ return 0;
+
+ lock(&fsp_con_lock);
+ written = fsp_write_vserial(&fsp_serials[fsp_con_port], buf, len);
+ fsp_con_full = (written < len);
+ unlock(&fsp_con_lock);
+
+ return written;
+}
+
+static struct con_ops fsp_con_ops = {
+ .write = fsp_con_write,
+};
+#endif /* DVS_CONSOLE */
+
+static void fsp_open_vserial(struct fsp_msg *msg)
+{
+ u16 part_id = msg->data.words[0] & 0xffff;
+ u16 sess_id = msg->data.words[1] & 0xffff;
+ u8 hmc_sess = msg->data.bytes[0];
+ u8 hmc_indx = msg->data.bytes[1];
+ u8 authority = msg->data.bytes[4];
+ u32 tce_in, tce_out;
+ struct fsp_serial *fs;
+
+ printf("FSPCON: Got VSerial Open\n");
+ printf(" part_id = 0x%04x\n", part_id);
+ printf(" sess_id = 0x%04x\n", sess_id);
+ printf(" hmc_sess = 0x%02x\n", hmc_sess);
+ printf(" hmc_indx = 0x%02x\n", hmc_indx);
+ printf(" authority = 0x%02x\n", authority);
+
+ if (sess_id >= MAX_SERIAL || !fsp_serials[sess_id].available) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_OPEN_VSERIAL | 0x2f, 0),
+ fsp_freemsg);
+ printf(" NOT AVAILABLE !\n");
+ return;
+ }
+
+ fs = &fsp_serials[sess_id];
+
+ /* Hack ! On blades, the console opened via the mm has partition 1
+ * while the debug DVS generally has partition 0 (though you can
+ * use what you want really).
+ * We don't want a DVS open/close to crap on the blademm console
+ * thus if it's a raw console, gets an open with partID 1, we
+ * set a flag that ignores the close of partid 0
+ */
+ if (fs->rsrc_id == 0xffff) {
+ if (part_id == 0)
+ fs->has_part0 = true;
+ if (part_id == 1)
+ fs->has_part1 = true;
+ }
+
+ tce_in = PSI_DMA_SER0_BASE + PSI_DMA_SER0_SIZE * sess_id;
+ tce_out = tce_in + SER_BUFFER_SIZE/2;
+
+ lock(&fsp_con_lock);
+ if (fs->open) {
+ printf(" already open, skipping init !\n");
+ unlock(&fsp_con_lock);
+ goto already_open;
+ }
+
+ fs->open = true;
+
+ fs->poke_msg = fsp_mkmsg(FSP_CMD_VSERIAL_OUT, 2,
+ msg->data.words[0],
+ msg->data.words[1] & 0xffff);
+ fs->poke_msg->user_data = fs;
+
+ fs->in_buf->partition_id = fs->out_buf->partition_id = part_id;
+ fs->in_buf->session_id = fs->out_buf->session_id = sess_id;
+ fs->in_buf->hmc_id = fs->out_buf->hmc_id = hmc_indx;
+ fs->in_buf->data_offset = fs->out_buf->data_offset =
+ sizeof(struct fsp_serbuf_hdr);
+ fs->in_buf->last_valid = fs->out_buf->last_valid =
+ SER_BUF_DATA_SIZE - 1;
+ fs->in_buf->ovf_count = fs->out_buf->ovf_count = 0;
+ fs->in_buf->next_in = fs->out_buf->next_in = 0;
+ fs->in_buf->flags = fs->out_buf->flags = 0;
+ fs->in_buf->reserved = fs->out_buf->reserved = 0;
+ fs->in_buf->next_out = fs->out_buf->next_out = 0;
+ unlock(&fsp_con_lock);
+
+ already_open:
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_OPEN_VSERIAL, 6,
+ msg->data.words[0],
+ msg->data.words[1] & 0xffff,
+ 0, tce_in, 0, tce_out), fsp_freemsg);
+
+#ifdef DVS_CONSOLE
+ printf(" log_port = %d\n", fs->log_port);
+ if (fs->log_port) {
+ fsp_con_port = sess_id;
+ sync();
+ /*
+ * We mark the FSP lock as being in the console
+ * path. We do that only once, we never unmark it
+ * (there is really no much point)
+ */
+ fsp_used_by_console();
+ fsp_con_lock.in_con_path = true;
+ set_console(&fsp_con_ops);
+ }
+#endif
+}
+
+static void fsp_close_vserial(struct fsp_msg *msg)
+{
+ u16 part_id = msg->data.words[0] & 0xffff;
+ u16 sess_id = msg->data.words[1] & 0xffff;
+ u8 hmc_sess = msg->data.bytes[0];
+ u8 hmc_indx = msg->data.bytes[1];
+ u8 authority = msg->data.bytes[4];
+ struct fsp_serial *fs;
+
+ printf("FSPCON: Got VSerial Close\n");
+ printf(" part_id = 0x%04x\n", part_id);
+ printf(" sess_id = 0x%04x\n", sess_id);
+ printf(" hmc_sess = 0x%02x\n", hmc_sess);
+ printf(" hmc_indx = 0x%02x\n", hmc_indx);
+ printf(" authority = 0x%02x\n", authority);
+
+ if (sess_id >= MAX_SERIAL || !fsp_serials[sess_id].available) {
+ printf(" NOT AVAILABLE !\n");
+ goto skip_close;
+ }
+
+ fs = &fsp_serials[sess_id];
+
+ /* See "HACK" comment in open */
+ if (fs->rsrc_id == 0xffff) {
+ if (part_id == 0)
+ fs->has_part0 = false;
+ if (part_id == 1)
+ fs->has_part1 = false;
+ if (fs->has_part0 || fs->has_part1) {
+ printf(" skipping close !\n");
+ goto skip_close;
+ }
+ }
+
+#ifdef DVS_CONSOLE
+ if (fs->log_port) {
+ fsp_con_port = -1;
+ set_console(NULL);
+ }
+#endif
+
+ lock(&fsp_con_lock);
+ if (fs->open) {
+ fs->open = false;
+ fs->out_poke = false;
+ if (fs->poke_msg && fs->poke_msg->state == fsp_msg_unused) {
+ fsp_freemsg(fs->poke_msg);
+ fs->poke_msg = NULL;
+ }
+ }
+ unlock(&fsp_con_lock);
+ skip_close:
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_CLOSE_VSERIAL, 2,
+ msg->data.words[0],
+ msg->data.words[1] & 0xffff),
+ fsp_freemsg);
+}
+
+static bool fsp_con_msg_hmc(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ /* Associate response */
+ if ((cmd_sub_mod >> 8) == 0xe08a) {
+ printf("FSPCON: Got associate response, status 0x%02x\n",
+ cmd_sub_mod & 0xff);
+ got_assoc_resp = true;
+ return true;
+ }
+ if ((cmd_sub_mod >> 8) == 0xe08b) {
+ printf("Got unassociate response, status 0x%02x\n",
+ cmd_sub_mod & 0xff);
+ got_deassoc_resp = true;
+ return true;
+ }
+ switch(cmd_sub_mod) {
+ case FSP_CMD_OPEN_VSERIAL:
+ fsp_open_vserial(msg);
+ return true;
+ case FSP_CMD_CLOSE_VSERIAL:
+ fsp_close_vserial(msg);
+ return true;
+ case FSP_CMD_HMC_INTF_QUERY:
+ printf("FSPCON: Got HMC interface query\n");
+
+ /* Keep that synchronous due to FSP fragile ordering
+ * of the boot sequence
+ */
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_HMC_INTF_QUERY, 1,
+ msg->data.words[0] & 0x00ffffff), true);
+ got_intf_query = true;
+ return true;
+ }
+ return false;
+}
+
+static bool fsp_con_msg_vt(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ u16 sess_id = msg->data.words[1] & 0xffff;
+
+ if (cmd_sub_mod == FSP_CMD_VSERIAL_IN && sess_id < MAX_SERIAL) {
+ struct fsp_serial *fs = &fsp_serials[sess_id];
+
+ if (!fs->open)
+ return true;
+
+ /* FSP is signaling some incoming data. We take the console
+ * lock to avoid racing with a simultaneous read, though we
+ * might want to consider to simplify all that locking into
+ * one single lock that covers the console and the pending
+ * events.
+ */
+ lock(&fsp_con_lock);
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT,
+ OPAL_EVENT_CONSOLE_INPUT);
+ unlock(&fsp_con_lock);
+ }
+ return true;
+}
+
+static bool fsp_con_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ fsp_close_consoles();
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ fsp_console_reinit();
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_con_client_hmc = {
+ .message = fsp_con_msg_hmc,
+};
+
+static struct fsp_client fsp_con_client_vt = {
+ .message = fsp_con_msg_vt,
+};
+
+static struct fsp_client fsp_con_client_rr = {
+ .message = fsp_con_msg_rr,
+};
+
+static void fsp_serial_add(int index, u16 rsrc_id, const char *loc_code,
+ bool log_port)
+{
+ struct fsp_serial *ser;
+
+ lock(&fsp_con_lock);
+ ser = &fsp_serials[index];
+
+ if (ser->available) {
+ unlock(&fsp_con_lock);
+ return;
+ }
+
+ ser->rsrc_id = rsrc_id;
+ strncpy(ser->loc_code, loc_code, LOC_CODE_SIZE);
+ ser->available = true;
+ ser->log_port = log_port;
+ unlock(&fsp_con_lock);
+
+ /* DVS doesn't have that */
+ if (rsrc_id != 0xffff) {
+ got_assoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2,
+ (rsrc_id << 16) | 1, index), true);
+ /* XXX add timeout ? */
+ while(!got_assoc_resp)
+ fsp_poll();
+ }
+}
+
+void fsp_console_preinit(void)
+{
+ int i;
+ void *base;
+
+ if (!fsp_present())
+ return;
+
+ ser_buffer = memalign(TCE_PSIZE, SER_BUFFER_SIZE * MAX_SERIAL);
+
+ /* Initialize out data structure pointers & TCE maps */
+ base = ser_buffer;
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *ser = &fsp_serials[i];
+
+ ser->in_buf = base;
+ ser->out_buf = base + SER_BUFFER_SIZE/2;
+ base += SER_BUFFER_SIZE;
+ }
+ fsp_tce_map(PSI_DMA_SER0_BASE, ser_buffer,
+ 4 * PSI_DMA_SER0_SIZE);
+
+ /* Register for class E0 and E1 */
+ fsp_register_client(&fsp_con_client_hmc, FSP_MCLASS_HMC_INTFMSG);
+ fsp_register_client(&fsp_con_client_vt, FSP_MCLASS_HMC_VT);
+ fsp_register_client(&fsp_con_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Add DVS ports. We currently have session 0 and 3, 0 is for
+ * OS use. 3 is our debug port. We need to add those before
+ * we complete the OPL or we'll potentially miss the
+ * console setup on Firebird blades.
+ */
+ fsp_serial_add(0, 0xffff, "DVS_OS", false);
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0001);
+ fsp_serial_add(3, 0xffff, "DVS_FW", true);
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0002);
+
+}
+
+static int64_t fsp_console_write(int64_t term_number, int64_t *length,
+ const uint8_t *buffer)
+{
+ struct fsp_serial *fs;
+ size_t written, requested;
+
+ if (term_number < 0 || term_number >= MAX_SERIAL)
+ return OPAL_PARAMETER;
+ fs = &fsp_serials[term_number];
+ if (!fs->available || fs->log_port)
+ return OPAL_PARAMETER;
+ lock(&fsp_con_lock);
+ if (!fs->open) {
+ unlock(&fsp_con_lock);
+ return OPAL_CLOSED;
+ }
+ /* Clamp to a reasonable size */
+ requested = *length;
+ if (requested > 0x1000)
+ requested = 0x1000;
+ written = fsp_write_vserial(fs, buffer, requested);
+
+#ifdef OPAL_DEBUG_CONSOLE_IO
+ printf("OPAL: console write req=%ld written=%ld ni=%d no=%d\n",
+ requested, written, fs->out_buf->next_in, fs->out_buf->next_out);
+ printf(" %02x %02x %02x %02x "
+ "%02x \'%c\' %02x \'%c\' %02x \'%c\'.%02x \'%c\'..\n",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[4], buffer[5], buffer[5],
+ buffer[6], buffer[6], buffer[7], buffer[7]);
+#endif /* OPAL_DEBUG_CONSOLE_IO */
+
+ *length = written;
+ unlock(&fsp_con_lock);
+
+ return written ? OPAL_SUCCESS : OPAL_BUSY_EVENT;
+}
+
+static int64_t fsp_console_write_buffer_space(int64_t term_number,
+ int64_t *length)
+{
+ struct fsp_serial *fs;
+ struct fsp_serbuf_hdr *sb;
+
+ if (term_number < 0 || term_number >= MAX_SERIAL)
+ return OPAL_PARAMETER;
+ fs = &fsp_serials[term_number];
+ if (!fs->available || fs->log_port)
+ return OPAL_PARAMETER;
+ lock(&fsp_con_lock);
+ if (!fs->open) {
+ unlock(&fsp_con_lock);
+ return OPAL_CLOSED;
+ }
+ sb = fs->out_buf;
+ *length = (sb->next_out + SER_BUF_DATA_SIZE - sb->next_in - 1)
+ % SER_BUF_DATA_SIZE;
+ unlock(&fsp_con_lock);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t fsp_console_read(int64_t term_number, int64_t *length,
+ uint8_t *buffer __unused)
+{
+ struct fsp_serial *fs;
+ struct fsp_serbuf_hdr *sb;
+ bool pending = false;
+ uint32_t old_nin, n, i, chunk, req = *length;
+
+ if (term_number < 0 || term_number >= MAX_SERIAL)
+ return OPAL_PARAMETER;
+ fs = &fsp_serials[term_number];
+ if (!fs->available || fs->log_port)
+ return OPAL_PARAMETER;
+ lock(&fsp_con_lock);
+ if (!fs->open) {
+ unlock(&fsp_con_lock);
+ return OPAL_CLOSED;
+ }
+ sb = fs->in_buf;
+ old_nin = sb->next_in;
+ lwsync();
+ n = (old_nin + SER_BUF_DATA_SIZE - sb->next_out)
+ % SER_BUF_DATA_SIZE;
+ if (n > req) {
+ pending = true;
+ n = req;
+ }
+ *length = n;
+
+ chunk = SER_BUF_DATA_SIZE - sb->next_out;
+ if (chunk > n)
+ chunk = n;
+ memcpy(buffer, &sb->data[sb->next_out], chunk);
+ if (chunk < n)
+ memcpy(buffer + chunk, &sb->data[0], n - chunk);
+ sb->next_out = (sb->next_out + n) % SER_BUF_DATA_SIZE;
+
+#ifdef OPAL_DEBUG_CONSOLE_IO
+ printf("OPAL: console read req=%d read=%d ni=%d no=%d\n",
+ req, n, sb->next_in, sb->next_out);
+ printf(" %02x %02x %02x %02x %02x %02x %02x %02x ...\n",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[5], buffer[6], buffer[7]);
+#endif /* OPAL_DEBUG_CONSOLE_IO */
+
+ /* Might clear the input pending flag */
+ for (i = 0; i < MAX_SERIAL && !pending; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->in_buf;
+
+ if (fs->log_port || !fs->open)
+ continue;
+ if (sb->next_out != sb->next_in)
+ pending = true;
+ }
+ if (!pending)
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, 0);
+
+ unlock(&fsp_con_lock);
+
+ return OPAL_SUCCESS;
+}
+
+void fsp_console_poll(void *data __unused)
+{
+#ifdef OPAL_DEBUG_CONSOLE_POLL
+ static int debug;
+#endif
+
+ /*
+ * We don't get messages for out buffer being consumed, so we
+ * need to poll. We also defer sending of poke messages from
+ * the sapphire console to avoid a locking nightmare with
+ * beging called from printf() deep into an existing lock nest
+ * stack.
+ */
+ if (fsp_con_full ||
+ (opal_pending_events & OPAL_EVENT_CONSOLE_OUTPUT)) {
+ unsigned int i;
+ bool pending = false;
+
+ /* We take the console lock. This is somewhat inefficient
+ * but it guarantees we aren't racing with a write, and
+ * thus clearing an event improperly
+ */
+ lock(&fsp_con_lock);
+ for (i = 0; i < MAX_SERIAL && !pending; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->out_buf;
+
+ if (!fs->open)
+ continue;
+ if (sb->next_out == sb->next_in)
+ continue;
+ if (fs->log_port)
+ __flush_console();
+ else {
+#ifdef OPAL_DEBUG_CONSOLE_POLL
+ if (debug < 5) {
+ printf("OPAL: %d still pending"
+ " ni=%d no=%d\n",
+ i, sb->next_in, sb->next_out);
+ debug++;
+ }
+#endif /* OPAL_DEBUG_CONSOLE_POLL */
+ pending = true;
+ }
+ }
+ if (!pending) {
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_OUTPUT, 0);
+#ifdef OPAL_DEBUG_CONSOLE_POLL
+ debug = 0;
+#endif
+ }
+ unlock(&fsp_con_lock);
+ }
+}
+
+void fsp_console_init(void)
+{
+ struct dt_node *serials, *ser;
+ int i;
+
+ if (!fsp_present())
+ return;
+
+ opal_register(OPAL_CONSOLE_READ, fsp_console_read, 3);
+ opal_register(OPAL_CONSOLE_WRITE_BUFFER_SPACE,
+ fsp_console_write_buffer_space, 2);
+ opal_register(OPAL_CONSOLE_WRITE, fsp_console_write, 3);
+
+ /* Wait until we got the intf query before moving on */
+ while (!got_intf_query)
+ fsp_poll();
+
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0000);
+
+ /* Register poller */
+ opal_add_poller(fsp_console_poll, NULL);
+
+ /* Parse serial port data */
+ serials = dt_find_by_path(dt_root, "ipl-params/fsp-serial");
+ if (!serials) {
+ prerror("FSPCON: No FSP serial ports in device-tree\n");
+ return;
+ }
+
+ i = 1;
+ dt_for_each_child(serials, ser) {
+ u32 rsrc_id = dt_prop_get_u32(ser, "reg");
+ const void *lc = dt_prop_get(ser, "ibm,loc-code");
+
+ printf("FSPCON: Serial %d rsrc: %04x loc: %s\n",
+ i, rsrc_id, (const char *)lc);
+ fsp_serial_add(i++, rsrc_id, lc, false);
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0010 + i);
+ }
+
+ op_display(OP_LOG, OP_MOD_FSPCON, 0x0005);
+}
+
+static void flush_all_input(void)
+{
+ unsigned int i;
+
+ lock(&fsp_con_lock);
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->in_buf;
+
+ if (fs->log_port)
+ continue;
+
+ sb->next_out = sb->next_in;
+ }
+ unlock(&fsp_con_lock);
+}
+
+static bool send_all_hvsi_close(void)
+{
+ unsigned int i;
+ bool has_hvsi = false;
+ static const uint8_t close_packet[] = { 0xfe, 6, 0, 1, 0, 3 };
+
+ lock(&fsp_con_lock);
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct fsp_serbuf_hdr *sb = fs->out_buf;
+ unsigned int space, timeout = 10;
+
+ if (fs->log_port)
+ continue;
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ has_hvsi = true;
+
+ /* Do we have room ? Wait a bit if not */
+ while(timeout--) {
+ space = (sb->next_out + SER_BUF_DATA_SIZE -
+ sb->next_in - 1) % SER_BUF_DATA_SIZE;
+ if (space >= 6)
+ break;
+ time_wait_ms(500);
+ }
+ fsp_write_vserial(fs, close_packet, 6);
+ }
+ unlock(&fsp_con_lock);
+
+ return has_hvsi;
+}
+
+static void reopen_all_hvsi(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ printf("FSP: Deassociating HVSI console %d\n", i);
+ got_deassoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_UNASSOC_SERIAL, 1,
+ (i << 16) | 1), true);
+ /* XXX add timeout ? */
+ while(!got_deassoc_resp)
+ fsp_poll();
+ }
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ if (fs->rsrc_id == 0xffff)
+ continue;
+ printf("FSP: Reassociating HVSI console %d\n", i);
+ got_assoc_resp = false;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2,
+ (fs->rsrc_id << 16) | 1, i), true);
+ /* XXX add timeout ? */
+ while(!got_assoc_resp)
+ fsp_poll();
+ }
+}
+
+void fsp_console_reset(void)
+{
+ printf("FSP: Console reset !\n");
+
+ /* This is called on a fast-reset. To work around issues with HVSI
+ * initial negotiation, before we reboot the kernel, we flush all
+ * input and send an HVSI close packet.
+ */
+ flush_all_input();
+
+ /* Returns false if there is no HVSI console */
+ if (!send_all_hvsi_close())
+ return;
+
+ time_wait_ms(500);
+
+ flush_all_input();
+
+ reopen_all_hvsi();
+
+}
+
+void fsp_console_add_nodes(void)
+{
+ unsigned int i;
+ struct dt_node *consoles;
+
+ consoles = dt_new(opal_node, "consoles");
+ dt_add_property_cells(consoles, "#address-cells", 1);
+ dt_add_property_cells(consoles, "#size-cells", 0);
+ for (i = 0; i < MAX_SERIAL; i++) {
+ struct fsp_serial *fs = &fsp_serials[i];
+ struct dt_node *fs_node;
+ char name[32];
+
+ if (fs->log_port || !fs->available)
+ continue;
+
+ snprintf(name, sizeof(name), "serial@%d", i);
+ fs_node = dt_new(consoles, name);
+ if (fs->rsrc_id == 0xffff)
+ dt_add_property_string(fs_node, "compatible",
+ "ibm,opal-console-raw");
+ else
+ dt_add_property_string(fs_node, "compatible",
+ "ibm,opal-console-hvsi");
+ dt_add_property_cells(fs_node,
+ "#write-buffer-size", SER_BUF_DATA_SIZE);
+ dt_add_property_cells(fs_node, "reg", i);
+ dt_add_property_string(fs_node, "device_type", "serial");
+ }
+}
+
+void fsp_console_select_stdout(void)
+{
+ struct dt_node *iplp;
+ u32 ipl_mode = 0;
+
+ if (!fsp_present())
+ return;
+
+ /*
+ * We hijack the "os-ipl-mode" setting in iplparams to select
+ * out output console. This is the "i5/OS partition mode boot"
+ * setting in ASMI converted to an integer: 0=A, 1=B, ...
+ */
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp)
+ ipl_mode = dt_prop_get_u32_def(iplp, "os-ipl-mode", 0);
+
+ /*
+ * Now, if ipl_mode is 1 or 2, we set the corresponding serial
+ * port if it exists (ie, is opened) as the default console.
+ *
+ * In any other case, we set the default console to serial0
+ * which is DVS or IPMI
+ */
+ if (ipl_mode == 1 && fsp_serials[1].open) {
+ dt_add_property_string(dt_chosen, "linux,stdout-path",
+ "/ibm,opal/consoles/serial@1");
+ printf("FSPCON: default console 1\n");
+ } else if (ipl_mode == 2 && fsp_serials[2].open) {
+ dt_add_property_string(dt_chosen, "linux,stdout-path",
+ "/ibm,opal/consoles/serial@2");
+ printf("FSPCON: default console 2\n");
+ } else {
+ dt_add_property_string(dt_chosen, "linux,stdout-path",
+ "/ibm,opal/consoles/serial@0");
+ printf("FSPCON: default console 0\n");
+ }
+}
+
diff --git a/hw/fsp/fsp-diag.c b/hw/fsp/fsp-diag.c
new file mode 100644
index 0000000..5f588af
--- /dev/null
+++ b/hw/fsp/fsp-diag.c
@@ -0,0 +1,58 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Code for handling FSP_MCLASS_DIAG messages (cmd 0xee)
+ * Receiving a high level ack timeout is likely indicative of a firmware bug
+ */
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <processor.h>
+#include <timebase.h>
+#include <opal.h>
+#include <fsp-sysparam.h>
+
+static bool fsp_diag_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+
+ if (cmd_sub_mod == FSP_RSP_DIAG_LINK_ERROR) {
+ printf("FIXME: Unhandled FSP_MCLASS_DIAG Link Error Report\n");
+ return false;
+ }
+
+ if (cmd_sub_mod != FSP_RSP_DIAG_ACK_TIMEOUT) {
+ printf("BUG: Unhandled subcommand: 0x%x (New FSP spec?)\n",
+ cmd_sub_mod);
+ return false;
+ }
+
+ printf("BUG: High Level ACK timeout (FSP_MCLASS_DIAG) for 0x%x\n",
+ msg->data.words[0] & 0xffff0000);
+
+ return true;
+}
+
+static struct fsp_client fsp_diag = {
+ .message = fsp_diag_msg,
+};
+
+/* This is called at boot time */
+void fsp_init_diag(void)
+{
+ /* Register for the diag event */
+ fsp_register_client(&fsp_diag, FSP_MCLASS_DIAG);
+}
diff --git a/hw/fsp/fsp-dump.c b/hw/fsp/fsp-dump.c
new file mode 100644
index 0000000..be1aa7c
--- /dev/null
+++ b/hw/fsp/fsp-dump.c
@@ -0,0 +1,917 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * Dump support:
+ * We get dump notification from different sources:
+ * - During system intialization via HDAT
+ * - During FSP reset/reload (FipS dump)
+ * - Dump available notification MBOX command (0xCE, 0x78, 0x00)
+ *
+ * To avoid complications, we keep list of dumps in a list and fetch
+ * them serially.
+ *
+ * Dump retrieve process:
+ * - Once we get notification from FSP we enqueue the dump ID and notify
+ * Linux via OPAL event notification.
+ * - Linux reads dump info and allocates required memory to fetch the dump
+ * and makes dump read call.
+ * - Sapphire fetches dump data from FSP.
+ * - Linux writes dump to disk and sends acknowledgement.
+ * - Sapphire acknowledges FSP.
+ */
+
+#include <fsp.h>
+#include <psi.h>
+#include <lock.h>
+#include <device.h>
+#include <skiboot.h>
+#include <fsp-elog.h>
+
+/*
+ * Max outstanding dumps to retrieve
+ *
+ * Note:
+ * Dumps are serialized. We don't get notification for second
+ * dump of given type until we acknowledge first one. But we
+ * may get notification for different dump type. And our dump
+ * retrieval code is serialized. Hence we use list to keep
+ * track of outstanding dumps to be retrieved.
+ */
+#define MAX_DUMP_RECORD 0x04
+
+/* Max retry */
+#define FIPS_DUMP_MAX_RETRY 0x03
+
+/* Dump type */
+#define DUMP_TYPE_FSP 0x01
+#define DUMP_TYPE_SYS 0x02
+#define DUMP_TYPE_SMA 0x03
+
+/* Dump fetch size */
+#define DUMP_FETCH_SIZE_FSP 0x500000
+#define DUMP_FETCH_SIZE_SYS 0x400000
+#define DUMP_FETCH_SIZE_RES 0x200000
+
+/* Params for Fips dump */
+#define FSP_DUMP_TOOL_TYPE "SYS "
+#define FSP_DUMP_CLIENT_ID "SAPPHIRE_CLIENT"
+
+enum dump_state {
+ DUMP_STATE_ABSENT, /* No FSP dump */
+ DUMP_STATE_NONE, /* No dump to retrieve */
+ DUMP_STATE_NOTIFY, /* Notified Linux */
+ DUMP_STATE_FETCHING, /* Dump retrieval is in progress */
+ DUMP_STATE_FETCH, /* Dump retrieve complete */
+ DUMP_STATE_PARTIAL, /* Partial read */
+ DUMP_STATE_ABORTING, /* Aborting due to kexec */
+};
+
+/* Pending dump list */
+struct dump_record {
+ uint8_t type;
+ uint32_t id;
+ uint32_t size;
+ struct list_node link;
+};
+
+/* List definations */
+static LIST_HEAD(dump_pending);
+static LIST_HEAD(dump_free);
+
+/* Dump retrieve state */
+static enum dump_state dump_state = DUMP_STATE_NONE;
+
+/* Dump buffer SG list */
+static struct opal_sg_list *dump_data;
+static struct dump_record *dump_entry;
+static int64_t dump_offset;
+static size_t fetch_remain;
+
+/* FipS dump retry count */
+static int retry_cnt;
+
+/* Protect list and dump retrieve state */
+static struct lock dump_lock = LOCK_UNLOCKED;
+
+/* Forward declaration */
+static int64_t fsp_opal_dump_init(uint8_t dump_type);
+static int64_t fsp_dump_read(void);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE,
+ OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_LIST, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE,
+ OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_ACK, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+/*
+ * Helper functions
+ */
+static inline void update_dump_state(enum dump_state state)
+{
+ dump_state = state;
+}
+
+static int64_t check_dump_state(void)
+{
+ switch (dump_state) {
+ case DUMP_STATE_ABSENT:
+ return OPAL_HARDWARE;
+ case DUMP_STATE_NONE:
+ case DUMP_STATE_NOTIFY:
+ /* During dump fetch, notify is wrong state */
+ return OPAL_WRONG_STATE;
+ case DUMP_STATE_FETCHING:
+ case DUMP_STATE_ABORTING:
+ return OPAL_BUSY_EVENT;
+ case DUMP_STATE_FETCH:
+ return OPAL_SUCCESS;
+ case DUMP_STATE_PARTIAL:
+ return OPAL_PARTIAL;
+ }
+ return OPAL_SUCCESS;
+}
+
+static inline void dump_tce_map(uint32_t tce_offset,
+ void *buffer, uint32_t size)
+{
+ uint32_t tlen = ALIGN_UP(size, TCE_PSIZE);
+ fsp_tce_map(PSI_DMA_DUMP_DATA + tce_offset, buffer, tlen);
+}
+
+static inline void dump_tce_unmap(uint32_t size)
+{
+ fsp_tce_unmap(PSI_DMA_DUMP_DATA, size);
+}
+
+/*
+ * Returns Data set ID for the given dump type
+ */
+static inline uint16_t get_dump_data_set_id(uint8_t type)
+{
+ switch (type) {
+ case DUMP_TYPE_FSP:
+ return FSP_DATASET_SP_DUMP;
+ case DUMP_TYPE_SYS:
+ return FSP_DATASET_HW_DUMP;
+ default:
+ break;
+ }
+ return OPAL_INTERNAL_ERROR;
+}
+
+/*
+ * Returns max data we can fetch from FSP fetch data call
+ */
+static inline int64_t get_dump_fetch_max_size(uint8_t type)
+{
+ switch (type) {
+ case DUMP_TYPE_FSP:
+ return DUMP_FETCH_SIZE_FSP;
+ case DUMP_TYPE_SYS:
+ return DUMP_FETCH_SIZE_SYS;
+ default:
+ break;
+ }
+ return OPAL_INTERNAL_ERROR;
+}
+
+/*
+ * Get dump record from pending list
+ */
+static inline struct dump_record *get_dump_rec_from_list(uint32_t id)
+{
+ struct dump_record *record;
+
+ list_for_each(&dump_pending, record, link) {
+ if (record->id == id)
+ return record;
+ }
+ return NULL;
+}
+
+/*
+ * New dump available notification to Linux
+ */
+static void update_opal_dump_notify(void)
+{
+ /*
+ * Wait until current dump retrieval to complete
+ * before notifying again.
+ */
+ if (dump_state != DUMP_STATE_NONE)
+ return;
+
+ /* More dump's to retrieve */
+ if (!list_empty(&dump_pending)) {
+ update_dump_state(DUMP_STATE_NOTIFY);
+ opal_update_pending_evt(OPAL_EVENT_DUMP_AVAIL,
+ OPAL_EVENT_DUMP_AVAIL);
+ }
+}
+
+static int64_t remove_dump_id_from_list(uint32_t dump_id)
+{
+ struct dump_record *record, *nxt_record;
+ int rc = OPAL_SUCCESS;
+ bool found = false;
+
+ /* Remove record from pending list */
+ list_for_each_safe(&dump_pending, record, nxt_record, link) {
+ if (record->id != dump_id)
+ continue;
+
+ found = true;
+ list_del(&record->link);
+ list_add(&dump_free, &record->link);
+ break;
+ }
+
+ /*
+ * Continue update_opal_dump_notify even if it fails
+ * to remove ID. So that we can resend notification
+ * for the same dump ID to Linux.
+ */
+ if (!found) { /* List corrupted? */
+ log_simple_error(&e_info(OPAL_RC_DUMP_LIST),
+ "DUMP: ID 0x%x not found in list!\n",
+ dump_id);
+ rc = OPAL_PARAMETER;
+ }
+
+ /* Update state */
+ update_dump_state(DUMP_STATE_NONE);
+ /* Notify next available dump to retrieve */
+ update_opal_dump_notify();
+
+ return rc;
+}
+
+static int64_t add_dump_id_to_list(uint8_t dump_type,
+ uint32_t dump_id, uint32_t dump_size)
+{
+ struct dump_record *record;
+ int rc = OPAL_SUCCESS;
+
+ lock(&dump_lock);
+
+ rc = check_dump_state();
+ if (rc == OPAL_HARDWARE)
+ goto out;
+
+ /* List is full ? */
+ if (list_empty(&dump_free)) {
+ printf("DUMP: Dump ID 0x%x is not queued.\n", dump_id);
+ rc = OPAL_RESOURCE;
+ goto out;
+ }
+
+ /* Already queued? */
+ record = get_dump_rec_from_list(dump_id);
+ if (record) {
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+
+ /* Add to list */
+ record = list_pop(&dump_free, struct dump_record, link);
+ record->type = dump_type;
+ record->id = dump_id;
+ record->size = dump_size;
+ list_add_tail(&dump_pending, &record->link);
+
+ /* OPAL notification */
+ update_opal_dump_notify();
+ rc = OPAL_SUCCESS;
+
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+static void dump_init_complete(struct fsp_msg *msg)
+{
+ uint8_t status = (msg->resp->word1 >> 8) & 0xff;
+
+ printf("DUMP: FipS dump init status = 0x%x\n", status);
+ fsp_freemsg(msg);
+
+ switch (status) {
+ case FSP_STATUS_SUCCESS:
+ printf("DUMP: Initiated FipS dump.\n");
+ break;
+ case FSP_STATUS_BUSY: /* Retry, if FSP is busy */
+ if (retry_cnt++ < FIPS_DUMP_MAX_RETRY)
+ if (fsp_opal_dump_init(DUMP_TYPE_FSP) == OPAL_SUCCESS)
+ return;
+ break;
+ default:
+ break;
+ }
+ /* Reset max retry count */
+ retry_cnt = 0;
+}
+
+/*
+ * Initiate new FipS dump
+ */
+static int64_t fsp_opal_dump_init(uint8_t dump_type)
+{
+ struct fsp_msg *msg;
+ int rc = OPAL_SUCCESS;
+ uint32_t *tool_type = (void *)FSP_DUMP_TOOL_TYPE;
+ uint32_t *client_id = (void *)FSP_DUMP_CLIENT_ID;
+
+ /* Only FipS dump generate request is supported */
+ if (dump_type != DUMP_TYPE_FSP)
+ return OPAL_PARAMETER;
+
+ msg = fsp_mkmsg(FSP_CMD_FSP_DUMP_INIT, 6, *tool_type,
+ sizeof(FSP_DUMP_CLIENT_ID), *client_id,
+ *(client_id + 1), *(client_id + 2), *(client_id + 3));
+
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_INIT),
+ "DUMP: Message allocation failed.\n");
+ rc = OPAL_INTERNAL_ERROR;
+ } else if (fsp_queue_msg(msg, dump_init_complete)) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_INIT),
+ "DUMP: Failed to queue FipS dump init request.\n");
+ fsp_freemsg(msg);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+
+ return rc;
+}
+
+/*
+ * OPAL interface to send dump information to Linux.
+ */
+static int64_t fsp_opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size,
+ uint32_t *dump_type)
+{
+ struct dump_record *record;
+ int rc = OPAL_SUCCESS;
+
+ lock(&dump_lock);
+
+ /* Clear notification */
+ opal_update_pending_evt(OPAL_EVENT_DUMP_AVAIL, 0);
+
+ record = list_top(&dump_pending, struct dump_record, link);
+ if (!record) { /* List corrupted? */
+ update_dump_state(DUMP_STATE_NONE);
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+ *dump_id = record->id;
+ *dump_size = record->size;
+ *dump_type = record->type;
+
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+static int64_t fsp_opal_dump_info(uint32_t *dump_id, uint32_t *dump_size)
+{
+ uint32_t dump_type;
+ return fsp_opal_dump_info2(dump_id, dump_size, &dump_type);
+}
+
+static int64_t validate_dump_sglist(struct opal_sg_list *list,
+ int64_t *size)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *prev_entry, *entry;
+ int length, num_entries, i;
+
+ prev_entry = NULL;
+ *size = 0;
+ for (sg = list; sg; sg = sg->next) {
+ length = sg->length - 16;
+ num_entries = length / sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return OPAL_PARAMETER;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+ *size += entry->length;
+
+ /* All entries must be aligned */
+ if (((uint64_t)entry->data) & 0xfff)
+ return OPAL_PARAMETER;
+
+ /* All non-terminal entries size must be aligned */
+ if (prev_entry && (prev_entry->length & 0xfff))
+ return OPAL_PARAMETER;
+
+ prev_entry = entry;
+ }
+ }
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Map dump buffer to TCE buffer
+ */
+static int64_t map_dump_buffer(void)
+{
+ struct opal_sg_list *sg;
+ struct opal_sg_entry *entry;
+ int64_t fetch_max;
+ int length, num_entries, i;
+ int buf_off, fetch_off, tce_off, sg_off;
+ bool last = false;
+
+ /* FSP fetch max size */
+ fetch_max = get_dump_fetch_max_size(dump_entry->type);
+ if (fetch_max > (dump_entry->size - dump_offset))
+ fetch_remain = dump_entry->size - dump_offset;
+ else
+ fetch_remain = fetch_max;
+
+ /* offsets */
+ fetch_off = fetch_remain;
+ tce_off = sg_off = 0;
+
+ for (sg = dump_data; sg; sg = sg->next) {
+ num_entries = (sg->length - 16) /
+ sizeof(struct opal_sg_entry);
+ if (num_entries <= 0)
+ return OPAL_PARAMETER;
+
+ for (i = 0; i < num_entries; i++) {
+ entry = &sg->entry[i];
+
+ /* Continue until we get offset */
+ if ((sg_off + entry->length) < dump_offset) {
+ sg_off += entry->length;
+ continue;
+ }
+
+ /*
+ * SG list entry size can be more than 4k.
+ * Map only required pages, instead of
+ * mapping entire entry.
+ */
+ if (!tce_off) {
+ buf_off = (dump_offset - sg_off) & ~0xfff;
+ length = entry->length - buf_off;
+ } else {
+ buf_off = 0;
+ length = entry->length;
+ }
+
+ /* Adjust length for last mapping */
+ if (fetch_off <= length) {
+ length = fetch_off;
+ last = true;
+ }
+
+ /* Adjust offset */
+ sg_off += entry->length;
+ fetch_off -= length;
+
+ /* TCE mapping */
+ dump_tce_map(tce_off, entry->data + buf_off, length);
+ tce_off += length;
+
+ /* TCE mapping complete */
+ if (last)
+ return OPAL_SUCCESS;
+ }
+ } /* outer loop */
+ return OPAL_PARAMETER;
+}
+
+static void dump_read_complete(struct fsp_msg *msg)
+{
+ void *buffer;
+ size_t length, offset;
+ int rc;
+ uint32_t dump_id;
+ uint16_t id;
+ uint8_t flags, status;
+ bool compl = false;
+
+ status = (msg->resp->word1 >> 8) & 0xff;
+ flags = (msg->data.words[0] >> 16) & 0xff;
+ id = msg->data.words[0] & 0xffff;
+ dump_id = msg->data.words[1];
+ offset = msg->resp->data.words[1];
+ length = msg->resp->data.words[2];
+
+ fsp_freemsg(msg);
+
+ lock(&dump_lock);
+
+ if (dump_state == DUMP_STATE_ABORTING) {
+ printf("DUMP: Fetch dump aborted, ID = 0x%x\n", dump_id);
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+ update_dump_state(DUMP_STATE_NONE);
+ goto bail;
+ }
+
+ switch (status) {
+ case FSP_STATUS_SUCCESS: /* Fetch next dump block */
+ if (dump_offset < dump_entry->size) {
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+ rc = fsp_dump_read();
+ if (rc == OPAL_SUCCESS)
+ goto bail;
+ } else { /* Dump read complete */
+ compl = true;
+ }
+ break;
+ case FSP_STATUS_MORE_DATA: /* More data to read */
+ offset += length;
+ buffer = (void *)PSI_DMA_DUMP_DATA + offset;
+ fetch_remain -= length;
+
+ rc = fsp_fetch_data_queue(flags, id, dump_id, offset, buffer,
+ &fetch_remain, dump_read_complete);
+ if (rc == OPAL_SUCCESS)
+ goto bail;
+ break;
+ default:
+ break;
+ }
+
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+
+ /* Update state */
+ if (compl) {
+ printf("DUMP: Fetch dump success. ID = 0x%x\n", dump_id);
+ update_dump_state(DUMP_STATE_FETCH);
+ } else {
+ printf("DUMP: Fetch dump partial. ID = 0x%x\n", dump_id);
+ update_dump_state(DUMP_STATE_PARTIAL);
+ }
+ bail:
+ unlock(&dump_lock);
+}
+
+/*
+ * Fetch dump data from FSP
+ */
+static int64_t fsp_dump_read(void)
+{
+ int64_t rc;
+ uint16_t data_set;
+ uint8_t flags = 0x00;
+
+ /* Get data set ID */
+ data_set = get_dump_data_set_id(dump_entry->type);
+
+ /* Map TCE buffer */
+ rc = map_dump_buffer();
+ if (rc != OPAL_SUCCESS) {
+ printf("DUMP: TCE mapping failed\n");
+ return rc;
+ }
+
+ printf("DUMP: Fetch Dump. ID = %02x, sub ID = %08x, len = %ld\n",
+ data_set, dump_entry->id, fetch_remain);
+
+ /* Fetch data */
+ rc = fsp_fetch_data_queue(flags, data_set, dump_entry->id,
+ dump_offset, (void *)PSI_DMA_DUMP_DATA,
+ &fetch_remain, dump_read_complete);
+
+ /* Adjust dump fetch offset */
+ dump_offset += fetch_remain;
+
+ return rc;
+}
+
+static int64_t fsp_opal_dump_read(uint32_t dump_id,
+ struct opal_sg_list *list)
+{
+ struct dump_record *record;
+ int64_t rc, size;
+
+ lock(&dump_lock);
+
+ /* Check state */
+ if (dump_state != DUMP_STATE_NOTIFY) {
+ rc = check_dump_state();
+ goto out;
+ }
+
+ /* Validate dump ID */
+ record = get_dump_rec_from_list(dump_id);
+ if (!record) { /* List corrupted? */
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+
+ /* Validate dump buffer and size */
+ rc = validate_dump_sglist(list, &size);
+ if (rc != OPAL_SUCCESS) {
+ printf("DUMP: SG list validation failed\n");
+ goto out;
+ }
+
+ if (size < record->size) { /* Insuffient buffer */
+ printf("DUMP: Insufficient buffer\n");
+ rc = OPAL_PARAMETER;
+ goto out;
+ }
+
+ /* Update state */
+ update_dump_state(DUMP_STATE_FETCHING);
+
+ /* Fetch dump data */
+ dump_entry = record;
+ dump_data = list;
+ dump_offset = 0;
+ rc = fsp_dump_read();
+ if (rc != OPAL_SUCCESS)
+ goto out;
+
+ /* Check status after initiating fetch data */
+ rc = check_dump_state();
+
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+static void dump_ack_complete(struct fsp_msg *msg)
+{
+ uint8_t status = (msg->resp->word1 >> 8) & 0xff;
+
+ if (status)
+ log_simple_error(&e_info(OPAL_RC_DUMP_ACK),
+ "DUMP: ACK failed for ID: 0x%x\n",
+ msg->data.words[0]);
+ else
+ printf("DUMP: ACKed dump ID: 0x%x\n", msg->data.words[0]);
+
+ fsp_freemsg(msg);
+}
+
+/*
+ * Acknowledge dump
+ */
+static int64_t fsp_opal_dump_ack(uint32_t dump_id)
+{
+ struct dump_record *record;
+ struct fsp_msg *msg;
+ int rc;
+ uint32_t cmd;
+ uint8_t dump_type = 0;
+
+ /* Get dump type */
+ lock(&dump_lock);
+ record = get_dump_rec_from_list(dump_id);
+ if (record)
+ dump_type = record->type;
+
+ /*
+ * Next available dump in pending list will be of different
+ * type. Hence we don't need to wait for ack complete.
+ *
+ * Note:
+ * This allows us to proceed even if we fail to ACK.
+ * In the worst case we may get notification for the
+ * same dump again, which is probably better than
+ * looping forever.
+ */
+ rc = remove_dump_id_from_list(dump_id);
+ if (rc != OPAL_SUCCESS) /* Invalid dump id */
+ goto out;
+
+ /* Adjust mod value */
+ cmd = FSP_CMD_ACK_DUMP | (dump_type & 0xff);
+ msg = fsp_mkmsg(cmd, 1, dump_id);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_ACK),
+ "DUMP: Message allocation failed.!\n");
+ rc = OPAL_INTERNAL_ERROR;
+ } else if (fsp_queue_msg(msg, dump_ack_complete)) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_ACK),
+ "DUMP: Failed to queue dump ack message.\n");
+ fsp_freemsg(msg);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+out:
+ unlock(&dump_lock);
+ return rc;
+}
+
+/* Resend dump available notification */
+static int64_t fsp_opal_dump_resend_notification(void)
+{
+ lock(&dump_lock);
+
+ if (dump_state != DUMP_STATE_ABSENT)
+ update_dump_state(DUMP_STATE_NONE);
+
+ update_opal_dump_notify();
+
+ unlock(&dump_lock);
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Handle FSP R/R event.
+ */
+static bool fsp_dump_retrieve_rr(uint32_t cmd_sub_mod,
+ struct fsp_msg *msg __unused)
+{
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ lock(&dump_lock);
+ /* Reset dump state */
+ if (dump_state == DUMP_STATE_FETCHING)
+ update_dump_state(DUMP_STATE_ABORTING);
+ unlock(&dump_lock);
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ lock(&dump_lock);
+
+ /* Reset TCE mapping */
+ dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE);
+
+ /* Reset dump state */
+ update_dump_state(DUMP_STATE_NONE);
+
+ /*
+ * For now keeping R/R handler simple. In the worst case
+ * we may endup resending dump available notification for
+ * same dump ID twice to Linux.
+ */
+ update_opal_dump_notify();
+ unlock(&dump_lock);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Handle host kexec'ing scenarios
+ */
+static bool opal_kexec_dump_notify(void *data __unused)
+{
+ bool ready = true;
+
+ lock(&dump_lock);
+
+ /* Dump retrieve is in progress? */
+ if (dump_state == DUMP_STATE_FETCHING)
+ dump_state = DUMP_STATE_ABORTING;
+
+ /* Not yet safe to kexec */
+ if (dump_state == DUMP_STATE_ABORTING)
+ ready = false;
+
+ unlock(&dump_lock);
+
+ return ready;
+}
+
+/*
+ * FipS dump notification
+ */
+void fsp_fips_dump_notify(uint32_t dump_id, uint32_t dump_size)
+{
+ printf("DUMP: FipS dump available. ID = 0x%x [size: %d bytes]\n",
+ dump_id, dump_size);
+ add_dump_id_to_list(DUMP_TYPE_FSP, dump_id, dump_size);
+}
+
+/*
+ * System/Platform dump notification
+ */
+static bool fsp_sys_dump_notify(uint32_t cmd_sub_mod, struct fsp_msg *msg)
+{
+ /*
+ * Though spec says mod 00 is deprecated we still
+ * seems to get mod 00 notification (at least on
+ * P7 machine).
+ */
+ if (cmd_sub_mod != FSP_RSP_SYS_DUMP &&
+ cmd_sub_mod != FSP_RSP_SYS_DUMP_OLD)
+ return false;
+
+ printf("DUMP: Platform dump available. ID = 0x%x [size: %d bytes]\n",
+ msg->data.words[0], msg->data.words[1]);
+
+ add_dump_id_to_list(DUMP_TYPE_SYS,
+ msg->data.words[0], msg->data.words[1]);
+ return true;
+}
+
+/*
+ * If platform dump available during IPL time, then we
+ * get notification via HDAT. Check for DT for the dump
+ * presence.
+ */
+static void check_ipl_sys_dump(void)
+{
+ struct dt_node *dump_node;
+ uint32_t dump_id, dump_size;
+
+ dump_node = dt_find_by_path(dt_root, "ipl-params/platform-dump");
+ if (!dump_node)
+ return;
+
+ if (!dt_find_property(dump_node, "dump-id"))
+ return;
+
+ dump_id = dt_prop_get_u32(dump_node, "dump-id");
+ dump_size = (uint32_t)dt_prop_get_u64(dump_node, "total-size");
+
+ printf("DUMP: Platform dump present during IPL.\n");
+ printf(" ID = 0x%x [size: %d bytes]\n", dump_id, dump_size);
+
+ add_dump_id_to_list(DUMP_TYPE_SYS, dump_id, dump_size);
+}
+
+/*
+ * Allocate and initialize dump list
+ */
+static int init_dump_free_list(void)
+{
+ struct dump_record *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct dump_record) * MAX_DUMP_RECORD);
+ if (!entry) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_INIT),
+ "DUMP: Out of memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_DUMP_RECORD; i++) {
+ list_add_tail(&dump_free, &entry->link);
+ entry++;
+ }
+ return 0;
+}
+
+static struct fsp_client fsp_sys_dump_client = {
+ .message = fsp_sys_dump_notify,
+};
+
+static struct fsp_client fsp_dump_client_rr = {
+ .message = fsp_dump_retrieve_rr,
+};
+
+void fsp_dump_init(void)
+{
+ if (!fsp_present()) {
+ update_dump_state(DUMP_STATE_ABSENT);
+ return;
+ }
+
+ /* Initialize list */
+ if (init_dump_free_list() != 0) {
+ update_dump_state(DUMP_STATE_ABSENT);
+ return;
+ }
+
+ /* Register for Class CE */
+ fsp_register_client(&fsp_sys_dump_client, FSP_MCLASS_SERVICE);
+ /* Register for Class AA (FSP R/R) */
+ fsp_register_client(&fsp_dump_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Register for sync on host reboot call */
+ opal_add_host_sync_notifier(opal_kexec_dump_notify, NULL);
+
+ /* OPAL interface */
+ opal_register(OPAL_DUMP_INIT, fsp_opal_dump_init, 1);
+ opal_register(OPAL_DUMP_INFO, fsp_opal_dump_info, 2);
+ opal_register(OPAL_DUMP_INFO2, fsp_opal_dump_info2, 3);
+ opal_register(OPAL_DUMP_READ, fsp_opal_dump_read, 2);
+ opal_register(OPAL_DUMP_ACK, fsp_opal_dump_ack, 1);
+ opal_register(OPAL_DUMP_RESEND, fsp_opal_dump_resend_notification, 0);
+
+ /* Check for platform dump presence during IPL time */
+ check_ipl_sys_dump();
+}
diff --git a/hw/fsp/fsp-elog-read.c b/hw/fsp/fsp-elog-read.c
new file mode 100644
index 0000000..f4a689f
--- /dev/null
+++ b/hw/fsp/fsp-elog-read.c
@@ -0,0 +1,520 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * This code will enable retrieving of error log from fsp->sapphire
+ * in sequence.
+ * Here, FSP would send next log only when sapphire sends a new
+ * log notification response to FSP. On Completion of reading
+ * the log from FSP, OPAL_EVENT_ERROR_LOG_AVAIL is signaled.
+ * This will remain raised until a call to opal_elog_read()
+ * is made and OPAL_SUCCESS is returned, upon which.
+ * the operation is complete and the event is cleared.
+ * This is READ action from FSP.
+ */
+
+/*
+ * Design of READ error log :
+ * When we receive a new error log entry notificatiion from FSP,
+ * we queue it into the "pending" list.
+ * If the "pending" list is not empty, then we start the fetching log from FSP.
+ *
+ * When Linux reads a log entry, we dequeue it from the "pending" list
+ * and enqueue it to another "processed" list. At this point, if the
+ * "pending" list is not empty, we continue to fetch the next log.
+ *
+ * When Linux calls opal_resend_pending_logs(), we fetch the log
+ * corresponding to the head of the pending list and move it to the
+ * processed list, and continue this process this until the pending list is
+ * empty. If the pending list was empty earlier and is currently non-empty, we
+ * initiate an error log fetch.
+ *
+ * When Linux acks an error log, we remove it from processed list.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <cpu.h>
+#include <lock.h>
+#include <errno.h>
+#include <psi.h>
+#include <fsp-elog.h>
+
+/*
+ * Maximum number of entries that are pre-allocated
+ * to keep track of pending elogs to be fetched.
+ */
+#define ELOG_READ_MAX_RECORD 128
+
+/* Following variables are used to indicate state of the
+ * head log entry which is being fetched from FSP and
+ * these variables are not overwritten until next log is
+ * retrieved from FSP.
+ */
+enum elog_head_state {
+ ELOG_STATE_FETCHING, /*In the process of reading log from FSP. */
+ ELOG_STATE_FETCHED, /* Indicates reading log from FSP completed */
+ ELOG_STATE_NONE, /* Indicates to fetch next log */
+ ELOG_STATE_REJECTED, /* resend all pending logs to linux */
+};
+
+/* structure to maintain log-id,log-size, pending and processed list */
+struct fsp_log_entry {
+ uint32_t log_id;
+ size_t log_size;
+ struct list_node link;
+};
+
+static LIST_HEAD(elog_read_pending);
+static LIST_HEAD(elog_read_processed);
+static LIST_HEAD(elog_read_free);
+
+/*
+ * lock is used to protect overwriting of processed and pending list
+ * and also used while updating state of each log
+ */
+static struct lock elog_read_lock = LOCK_UNLOCKED;
+
+/* log buffer to copy FSP log for READ */
+#define ELOG_READ_BUFFER_SIZE 0x00040000
+static void *elog_read_buffer = NULL;
+static uint32_t elog_head_id; /* FSP entry ID */
+static size_t elog_head_size; /* actual FSP log size */
+static uint32_t elog_read_retries; /* bad response status count */
+
+/* Initialize the state of the log */
+static enum elog_head_state elog_head_state = ELOG_STATE_NONE;
+
+/* Need forward declaration because of Circular dependency */
+static void fsp_elog_queue_fetch(void);
+
+/*
+ * check the response message for mbox acknowledgment
+ * command send to FSP.
+ */
+static void fsp_elog_ack_complete(struct fsp_msg *msg)
+{
+ uint8_t val;
+
+ if (!msg->resp)
+ return;
+ val = (msg->resp->word1 >> 8) & 0xff;
+ if (val != 0)
+ prerror("ELOG: Acknowledgment error\n");
+ fsp_freemsg(msg);
+}
+
+/* send Error Log PHYP Acknowledgment to FSP with entry ID */
+static int64_t fsp_send_elog_ack(uint32_t log_id)
+{
+
+ struct fsp_msg *ack_msg;
+
+ ack_msg = fsp_mkmsg(FSP_CMD_ERRLOG_PHYP_ACK, 1, log_id);
+ if (!ack_msg) {
+ prerror("ELOG: Failed to allocate ack message\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(ack_msg, fsp_elog_ack_complete)) {
+ fsp_freemsg(ack_msg);
+ ack_msg = NULL;
+ prerror("ELOG: Error queueing elog ack complete\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+/* retrive error log from FSP with TCE for the data transfer */
+static void fsp_elog_check_and_fetch_head(void)
+{
+ lock(&elog_read_lock);
+
+ if (elog_head_state != ELOG_STATE_NONE ||
+ list_empty(&elog_read_pending)) {
+ unlock(&elog_read_lock);
+ return;
+ }
+
+ elog_read_retries = 0;
+
+ /* Start fetching first entry from the pending list */
+ fsp_elog_queue_fetch();
+ unlock(&elog_read_lock);
+}
+
+/* this function should be called with the lock held */
+static void fsp_elog_set_head_state(enum elog_head_state state)
+{
+ enum elog_head_state old_state = elog_head_state;
+
+ elog_head_state = state;
+
+ if (state == ELOG_STATE_FETCHED && old_state != ELOG_STATE_FETCHED)
+ opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL,
+ OPAL_EVENT_ERROR_LOG_AVAIL);
+ if (state != ELOG_STATE_FETCHED && old_state == ELOG_STATE_FETCHED)
+ opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL, 0);
+}
+
+/*
+ * when we try maximum time of fetching log from fsp
+ * we call following function to delete log from the
+ * pending list and update the state to fetch next log
+ *
+ * this function should be called with the lock held
+ */
+static void fsp_elog_fetch_failure(uint8_t fsp_status)
+{
+ struct fsp_log_entry *log_data;
+
+ /* read top list and delete the node */
+ log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
+ list_del(&log_data->link);
+ list_add(&elog_read_free, &log_data->link);
+ prerror("ELOG: received invalid data: %x FSP status: 0x%x\n",
+ log_data->log_id, fsp_status);
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+}
+
+/* Read response value from FSP for fetch sp data mbox command */
+static void fsp_elog_read_complete(struct fsp_msg *read_msg)
+{
+ uint8_t val;
+ /*struct fsp_log_entry *log_data;*/
+
+ lock(&elog_read_lock);
+ val = (read_msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(read_msg);
+
+ switch (val) {
+ case FSP_STATUS_SUCCESS:
+ fsp_elog_set_head_state(ELOG_STATE_FETCHED);
+ break;
+
+ case FSP_STATUS_DMA_ERROR:
+ if (elog_read_retries++ < MAX_RETRIES) {
+ /*
+ * for a error response value from FSP, we try to
+ * send fetch sp data mbox command again for three
+ * times if response from FSP is still not valid
+ * we send generic error response to fsp.
+ */
+ fsp_elog_queue_fetch();
+ break;
+ }
+ fsp_elog_fetch_failure(val);
+ break;
+
+ default:
+ fsp_elog_fetch_failure(val);
+ }
+ if (elog_head_state == ELOG_STATE_REJECTED)
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+ unlock(&elog_read_lock);
+
+ /* Check if a new log needs fetching */
+ fsp_elog_check_and_fetch_head();
+}
+
+/* read error log from FSP through mbox commands */
+static void fsp_elog_queue_fetch(void)
+{
+ int rc;
+ uint8_t flags = 0;
+ struct fsp_log_entry *entry;
+
+ entry = list_top(&elog_read_pending, struct fsp_log_entry, link);
+ fsp_elog_set_head_state(ELOG_STATE_FETCHING);
+ elog_head_id = entry->log_id;
+ elog_head_size = entry->log_size;
+
+ rc = fsp_fetch_data_queue(flags, FSP_DATASET_ERRLOG, elog_head_id,
+ 0, (void *)PSI_DMA_ERRLOG_READ_BUF,
+ &elog_head_size, fsp_elog_read_complete);
+ if (rc) {
+ prerror("ELOG: failed to queue read message: %d\n", rc);
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+ }
+}
+
+/* opal interface for powernv to read log size and log ID from sapphire */
+static int64_t fsp_opal_elog_info(uint64_t *opla_elog_id,
+ uint64_t *opal_elog_size, uint64_t *elog_type)
+{
+ struct fsp_log_entry *log_data;
+
+ /* copy type of the error log */
+ *elog_type = ELOG_TYPE_PEL;
+
+ lock(&elog_read_lock);
+ if (elog_head_state != ELOG_STATE_FETCHED) {
+ unlock(&elog_read_lock);
+ return OPAL_WRONG_STATE;
+ }
+ log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
+ *opla_elog_id = log_data->log_id;
+ *opal_elog_size = log_data->log_size;
+ unlock(&elog_read_lock);
+ return OPAL_SUCCESS;
+}
+
+/* opal interface for powernv to read log from sapphire */
+static int64_t fsp_opal_elog_read(uint64_t *buffer, uint64_t opal_elog_size,
+ uint64_t opla_elog_id)
+{
+ struct fsp_log_entry *log_data;
+
+ /*
+ * Read top entry from list.
+ * as we know always top record of the list is fetched from FSP
+ */
+ lock(&elog_read_lock);
+ if (elog_head_state != ELOG_STATE_FETCHED) {
+ unlock(&elog_read_lock);
+ return OPAL_WRONG_STATE;
+ }
+
+ log_data = list_top(&elog_read_pending, struct fsp_log_entry, link);
+
+ /* Check log ID and log size are same and then read log from buffer */
+ if ((opla_elog_id != log_data->log_id) &&
+ (opal_elog_size != log_data->log_size)) {
+ unlock(&elog_read_lock);
+ return OPAL_PARAMETER;
+ }
+
+ memcpy((void *)buffer, elog_read_buffer, opal_elog_size);
+
+ /*
+ * once log is read from linux move record from pending
+ * to processed list and delete record from pending list
+ * and change state of the log to fetch next record
+ */
+ list_del(&log_data->link);
+ list_add(&elog_read_processed, &log_data->link);
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+ unlock(&elog_read_lock);
+
+
+ /* read error log from FSP */
+ fsp_elog_check_and_fetch_head();
+
+ return OPAL_SUCCESS;
+}
+
+/* set state of the log head before fetching the log */
+static void elog_reject_head(void)
+{
+ if (elog_head_state == ELOG_STATE_FETCHING)
+ fsp_elog_set_head_state(ELOG_STATE_REJECTED);
+ if (elog_head_state == ELOG_STATE_FETCHED)
+ fsp_elog_set_head_state(ELOG_STATE_NONE);
+}
+
+/* opal Interface for powernv to send ack to fsp with log ID */
+static int64_t fsp_opal_elog_ack(uint64_t ack_id)
+{
+ int rc = 0;
+ struct fsp_log_entry *record, *next_record;
+
+ /* Send acknowledgement to FSP */
+ rc = fsp_send_elog_ack(ack_id);
+ if (rc != OPAL_SUCCESS) {
+ prerror("ELOG: failed to send acknowledgement: %d\n", rc);
+ return rc;
+ }
+ lock(&elog_read_lock);
+ if (ack_id == elog_head_id)
+ elog_reject_head();
+ list_for_each_safe(&elog_read_pending, record, next_record, link) {
+ if (record->log_id != ack_id)
+ continue;
+ list_del(&record->link);
+ list_add(&elog_read_free, &record->link);
+ }
+ list_for_each_safe(&elog_read_processed, record, next_record, link) {
+ if (record->log_id != ack_id)
+ continue;
+ list_del(&record->link);
+ list_add(&elog_read_free, &record->link);
+ }
+ unlock(&elog_read_lock);
+
+ return rc;
+}
+
+/*
+ * once linux kexec's it ask to resend all logs which
+ * are not acknowledged from linux
+ */
+static void fsp_opal_resend_pending_logs(void)
+{
+ struct fsp_log_entry *entry;
+
+ lock(&elog_read_lock);
+
+ /*
+ * If processed list is not empty add all record from
+ * processed list to pending list at head of the list
+ * and delete records from processed list.
+ */
+ while (!list_empty(&elog_read_processed)) {
+ entry = list_pop(&elog_read_processed,
+ struct fsp_log_entry, link);
+ list_add(&elog_read_pending, &entry->link);
+ }
+
+ /*
+ * If the current fetched or fetching log doesn't match our
+ * new pending list head, then reject it
+ */
+ if (!list_empty(&elog_read_pending)) {
+ entry = list_top(&elog_read_pending,
+ struct fsp_log_entry, link);
+ if (entry->log_id != elog_head_id)
+ elog_reject_head();
+ }
+
+ unlock(&elog_read_lock);
+
+ /* Read error log from FSP if needed */
+ fsp_elog_check_and_fetch_head();
+}
+
+/* fsp elog notify function */
+static bool fsp_elog_msg(uint32_t cmd_sub_mod, struct fsp_msg *msg)
+{
+ int rc = 0;
+ struct fsp_log_entry *record;
+ uint32_t log_id;
+ uint32_t log_size;
+
+
+ if (cmd_sub_mod != FSP_CMD_ERRLOG_NOTIFICATION)
+ return false;
+
+ log_id = msg->data.words[0];
+ log_size = msg->data.words[1];
+
+ printf("ELOG: Notified of log 0x%08x (size: %d)\n",
+ log_id, log_size);
+
+ /* take a lock until we take out the node from elog_read_free */
+ lock(&elog_read_lock);
+ if (!list_empty(&elog_read_free)) {
+ /* Create a new entry in the pending list */
+ record = list_pop(&elog_read_free, struct fsp_log_entry, link);
+ record->log_id = log_id;
+ record->log_size = log_size;
+ list_add_tail(&elog_read_pending, &record->link);
+ unlock(&elog_read_lock);
+
+ /* Send response back to FSP for a new elog notify message */
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION,
+ 1, log_id), fsp_freemsg);
+ if (rc)
+ prerror("ELOG: Failed to queue errlog notification"
+ " response: %d\n", rc);
+
+ /* read error log from FSP */
+ fsp_elog_check_and_fetch_head();
+
+ } else {
+ printf("ELOG: Log entry 0x%08x discarded\n", log_id);
+
+ /* unlock if elog_read_free is empty */
+ unlock(&elog_read_lock);
+
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION,
+ 1, log_id), fsp_freemsg);
+ if (rc)
+ prerror("ELOG: Failed to queue errlog notification"
+ " response: %d\n", rc);
+ /*
+ * if list is full with max record then we
+ * send discarded by phyp (condition full) ack to FSP.
+ *
+ * At some point in the future, we'll get notified again.
+ * This is largely up to FSP as to when they tell us about
+ * the log again.
+ */
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_ERRLOG_PHYP_ACK | 0x02,
+ 1, log_id), fsp_freemsg);
+ if (rc)
+ prerror("ELOG: Failed to queue errlog ack"
+ " response: %d\n", rc);
+ }
+
+ return true;
+}
+
+static struct fsp_client fsp_get_elog_notify = {
+ .message = fsp_elog_msg,
+};
+
+/* Pre-allocate memory for reading error log from FSP */
+static int init_elog_read_free_list(uint32_t num_entries)
+{
+ struct fsp_log_entry *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct fsp_log_entry) * num_entries);
+ if (!entry)
+ goto out_err;
+
+ for (i = 0; i < num_entries; ++i) {
+ list_add_tail(&elog_read_free, &entry->link);
+ entry++;
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/* fsp elog read init function */
+void fsp_elog_read_init(void)
+{
+ int val = 0;
+
+ if (!fsp_present())
+ return;
+
+ elog_read_buffer = memalign(TCE_PSIZE, ELOG_READ_BUFFER_SIZE);
+ if (!elog_read_buffer) {
+ prerror("FSP: could not allocate FSP ELOG_READ_BUFFER!\n");
+ return;
+ }
+
+ /* Map TCEs */
+ fsp_tce_map(PSI_DMA_ERRLOG_READ_BUF, elog_read_buffer,
+ PSI_DMA_ERRLOG_READ_BUF_SZ);
+
+ /* pre allocate memory for 128 record */
+ val = init_elog_read_free_list(ELOG_READ_MAX_RECORD);
+ if (val != 0)
+ return;
+
+ /* register Eror log Class D2 */
+ fsp_register_client(&fsp_get_elog_notify, FSP_MCLASS_ERR_LOG);
+
+ /* register opal Interface */
+ opal_register(OPAL_ELOG_READ, fsp_opal_elog_read, 3);
+ opal_register(OPAL_ELOG_ACK, fsp_opal_elog_ack, 1);
+ opal_register(OPAL_ELOG_RESEND, fsp_opal_resend_pending_logs, 0);
+ opal_register(OPAL_ELOG_SIZE, fsp_opal_elog_info, 3);
+}
diff --git a/hw/fsp/fsp-elog-write.c b/hw/fsp/fsp-elog-write.c
new file mode 100644
index 0000000..ee79c4d
--- /dev/null
+++ b/hw/fsp/fsp-elog-write.c
@@ -0,0 +1,643 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * This code will enable generation and pushing of error log
+ * from powernv, sapphire to FSP
+ * Critical events from sapphire that needs to be reported
+ * will be pushed on to FSP after converting the
+ * error log to Platform Error Log (PEL) format.
+ * This is termed as WRITE action to FSP.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <cpu.h>
+#include <lock.h>
+#include <errno.h>
+#include <fsp-elog.h>
+
+/*
+ * Maximum number buffers that are pre-allocated
+ * to hold elogs that are reported on Sapphire and
+ * powernv.
+ */
+#define ELOG_WRITE_MAX_RECORD 64
+
+static LIST_HEAD(elog_write_pending);
+static LIST_HEAD(elog_write_free);
+
+static struct lock elog_write_lock = LOCK_UNLOCKED;
+static struct lock elog_panic_write_lock = LOCK_UNLOCKED;
+
+/* Platform Log ID as per the spec */
+static uint32_t sapphire_elog_id = 0xB0000000;
+static uint32_t powernv_elog_id = 0xB1000000;
+
+/* log buffer to copy FSP log for READ */
+#define ELOG_WRITE_BUFFER_SIZE 0x00050000
+static void *elog_write_buffer = NULL;
+
+#define ELOG_PANIC_WRITE_BUFFER_SIZE 0x0010000
+static void *elog_panic_write_buffer = NULL;
+
+struct opal_errorlog *panic_write_buffer;
+static int panic_write_buffer_valid;
+static uint32_t elog_write_retries;
+
+/* Need forward declaration because of Circular dependency */
+static int create_opal_event(struct opal_errorlog *elog_data, char *pel_buffer);
+static int opal_send_elog_to_fsp(void);
+
+void log_error(struct opal_err_info *e_info, void *data, uint16_t size,
+ const char *fmt, ...)
+{
+ struct opal_errorlog *buf;
+ int tag = 0x44455343; /* ASCII of DESC */
+ va_list list;
+ char err_msg[250];
+
+ va_start(list, fmt);
+ vsnprintf(err_msg, sizeof(err_msg), fmt, list);
+ va_end(list);
+
+ /* Log the error on to Sapphire console */
+ prerror("%s", err_msg);
+
+ buf = opal_elog_create(e_info);
+ if (buf == NULL)
+ prerror("ELOG: Error getting buffer to log error\n");
+ else {
+ opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
+ /* Append any number of call out dumps */
+ if (e_info->call_out)
+ e_info->call_out(buf, data, size);
+ if (elog_fsp_commit(buf))
+ prerror("ELOG: Re-try error logging\n");
+ }
+}
+
+
+void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...)
+{
+ struct opal_errorlog *buf;
+ int tag = 0x44455343; /* ASCII of DESC */
+ va_list list;
+ char err_msg[250];
+
+ va_start(list, fmt);
+ vsnprintf(err_msg, sizeof(err_msg), fmt, list);
+ va_end(list);
+
+ /* Log the error on to Sapphire console */
+ prerror("%s", err_msg);
+
+ buf = opal_elog_create(e_info);
+ if (buf == NULL)
+ prerror("ELOG: Error getting buffer to log error\n");
+ else {
+ opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
+ if (elog_fsp_commit(buf))
+ prerror("ELOG: Re-try error logging\n");
+ }
+}
+
+static struct opal_errorlog *get_write_buffer(int opal_event_severity)
+{
+ struct opal_errorlog *buf;
+
+ lock(&elog_write_lock);
+ if (list_empty(&elog_write_free)) {
+ unlock(&elog_write_lock);
+ if (opal_event_severity == OPAL_ERROR_PANIC) {
+ lock(&elog_panic_write_lock);
+ if (panic_write_buffer_valid == 0) {
+ buf = (struct opal_errorlog *)
+ panic_write_buffer;
+ panic_write_buffer_valid = 1; /* In Use */
+ unlock(&elog_panic_write_lock);
+ } else {
+ unlock(&elog_panic_write_lock);
+ prerror("ELOG: Write buffer full. Retry later\n");
+ return NULL;
+ }
+ } else {
+ prerror("ELOG: Write buffer list is full. Retry later\n");
+ return NULL;
+ }
+ } else {
+ buf = list_pop(&elog_write_free, struct opal_errorlog, link);
+ unlock(&elog_write_lock);
+ }
+
+ memset(buf, 0, sizeof(struct opal_errorlog));
+ return buf;
+}
+
+/* Reporting of error via struct opal_errorlog */
+struct opal_errorlog *opal_elog_create(struct opal_err_info *e_info)
+{
+ struct opal_errorlog *buf;
+
+ buf = get_write_buffer(e_info->sev);
+ if (buf) {
+ buf->error_event_type = e_info->err_type;
+ buf->component_id = e_info->cmp_id;
+ buf->subsystem_id = e_info->subsystem;
+ buf->event_severity = e_info->sev;
+ buf->event_subtype = e_info->event_subtype;
+ buf->reason_code = e_info->reason_code;
+ buf->elog_origin = ORG_SAPPHIRE;
+ }
+
+ return buf;
+}
+
+static void remove_elog_head_entry(void)
+{
+ struct opal_errorlog *entry;
+
+ lock(&elog_write_lock);
+ entry = list_pop(&elog_write_pending, struct opal_errorlog, link);
+ list_add_tail(&elog_write_free, &entry->link);
+ elog_write_retries = 0;
+ unlock(&elog_write_lock);
+}
+
+static void opal_fsp_write_complete(struct fsp_msg *read_msg)
+{
+ uint8_t val;
+
+ val = (read_msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(read_msg);
+
+ switch (val) {
+ case FSP_STATUS_SUCCESS:
+ remove_elog_head_entry();
+ break;
+
+ default:
+ if (elog_write_retries++ >= MAX_RETRIES) {
+ remove_elog_head_entry();
+ prerror("ELOG: Error in writing to FSP!\n");
+ }
+ break;
+ }
+
+ if (opal_send_elog_to_fsp() != OPAL_SUCCESS)
+ prerror("ELOG: Error sending elog to FSP !\n");
+}
+
+/* write PEL format hex dump of the log to FSP */
+static int64_t fsp_opal_elog_write(size_t opal_elog_size)
+{
+ struct fsp_msg *elog_msg;
+
+ elog_msg = fsp_mkmsg(FSP_CMD_CREATE_ERRLOG, 3, opal_elog_size,
+ 0, PSI_DMA_ERRLOG_WRITE_BUF);
+ if (!elog_msg) {
+ prerror("ELOG: Failed to create message for WRITE to FSP\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(elog_msg, opal_fsp_write_complete)) {
+ fsp_freemsg(elog_msg);
+ elog_msg = NULL;
+ prerror("FSP: Error queueing elog update\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+static int opal_send_elog_to_fsp(void)
+{
+ struct opal_errorlog *head;
+ int rc = OPAL_SUCCESS;
+ int pel_offset = 0;
+
+ /* Convert entry to PEL
+ * and push it down to FSP. We wait for the ack from
+ * FSP.
+ */
+ lock(&elog_write_lock);
+ if (!list_empty(&elog_write_pending)) {
+ head = list_top(&elog_write_pending,
+ struct opal_errorlog, link);
+ pel_offset = create_opal_event(head, (char *)elog_write_buffer);
+ rc = fsp_opal_elog_write(pel_offset);
+ unlock(&elog_write_lock);
+ return rc;
+ }
+ unlock(&elog_write_lock);
+ return rc;
+}
+
+static int opal_push_logs_sync_to_fsp(struct opal_errorlog *buf)
+{
+ struct fsp_msg *elog_msg;
+ int opal_elog_size = 0;
+ int rc = OPAL_SUCCESS;
+
+ lock(&elog_panic_write_lock);
+ opal_elog_size = create_opal_event(buf,
+ (char *)elog_panic_write_buffer);
+
+ elog_msg = fsp_mkmsg(FSP_CMD_CREATE_ERRLOG, 3, opal_elog_size,
+ 0, PSI_DMA_ELOG_PANIC_WRITE_BUF);
+ if (!elog_msg) {
+ prerror("ELOG: Failed to create message for WRITE to FSP\n");
+ unlock(&elog_panic_write_lock);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ if (fsp_sync_msg(elog_msg, false)) {
+ fsp_freemsg(elog_msg);
+ rc = OPAL_INTERNAL_ERROR;
+ } else {
+ rc = (elog_msg->resp->word1 >> 8) & 0xff;
+ fsp_freemsg(elog_msg);
+ }
+
+ if ((buf == panic_write_buffer) && (panic_write_buffer_valid == 1)) {
+ panic_write_buffer_valid = 0;
+ unlock(&elog_panic_write_lock);
+ } else {
+ /* buffer got from the elog_write list , put it back */
+ unlock(&elog_panic_write_lock);
+ lock(&elog_write_lock);
+ list_add_tail(&elog_write_free, &buf->link);
+ unlock(&elog_write_lock);
+ }
+ return rc;
+}
+
+int elog_fsp_commit(struct opal_errorlog *buf)
+{
+ int rc = OPAL_SUCCESS;
+
+ if (buf->event_severity == OPAL_ERROR_PANIC) {
+ rc = opal_push_logs_sync_to_fsp(buf);
+ return rc;
+ }
+
+ lock(&elog_write_lock);
+ if (list_empty(&elog_write_pending)) {
+ list_add_tail(&elog_write_pending, &buf->link);
+ unlock(&elog_write_lock);
+ rc = opal_send_elog_to_fsp();
+ return rc;
+ }
+ list_add_tail(&elog_write_pending, &buf->link);
+ unlock(&elog_write_lock);
+ return rc;
+}
+
+/* This function is called from POWERNV to push logs
+ * on FSP
+ */
+static int opal_commit_log_to_fsp(struct opal_errorlog *buf)
+{
+ struct opal_errorlog *opal_buf;
+ int rc = OPAL_SUCCESS;
+
+ /* Copy the buffer to Sapphire and queue it to push
+ * to FSP and return
+ */
+ lock(&elog_write_lock);
+ if (list_empty(&elog_write_free)) {
+ unlock(&elog_write_lock);
+ prerror("ELOG: Error! Write buffer list is full. Retry later\n");
+ return -1;
+ }
+ opal_buf = list_pop(&elog_write_free, struct opal_errorlog, link);
+ unlock(&elog_write_lock);
+ memcpy(opal_buf, buf, sizeof(struct opal_errorlog));
+ opal_buf->elog_origin = ORG_POWERNV;
+ rc = elog_fsp_commit(opal_buf);
+ return rc;
+}
+
+int opal_elog_update_user_dump(struct opal_errorlog *buf, unsigned char *data,
+ uint32_t tag, uint16_t size)
+{
+ char *buffer;
+ struct opal_user_data_section *tmp;
+
+ if (!buf) {
+ prerror("ELOG: Cannot update user data. Buffer is invalid\n");
+ return -1;
+ }
+
+ buffer = (char *)buf->user_data_dump + buf->user_section_size;
+ if ((buf->user_section_size + size) > OPAL_LOG_MAX_DUMP) {
+ prerror("ELOG: Size of dump data overruns buffer\n");
+ return -1;
+ }
+
+ tmp = (struct opal_user_data_section *)buffer;
+ tmp->tag = tag;
+ tmp->size = size + sizeof(struct opal_user_data_section) - 1;
+ memcpy(tmp->data_dump, data, size);
+
+ buf->user_section_size += tmp->size;
+ buf->user_section_count++;
+ return 0;
+}
+
+/* Create MTMS section for sapphire log */
+static void create_mtms_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ struct opal_mtms_section *mtms = (struct opal_mtms_section *)
+ (pel_buffer + *pel_offset);
+
+ mtms->v6header.id = ELOG_SID_MACHINE_TYPE;
+ mtms->v6header.length = MTMS_SECTION_SIZE;
+ mtms->v6header.version = OPAL_EXT_HRD_VER;
+ mtms->v6header.subtype = 0;
+ mtms->v6header.component_id = elog_data->component_id;
+
+ memset(mtms->model, 0x00, sizeof(mtms->model));
+ memcpy(mtms->model, dt_prop_get(dt_root, "model"), OPAL_SYS_MODEL_LEN);
+ memset(mtms->serial_no, 0x00, sizeof(mtms->serial_no));
+
+ memcpy(mtms->serial_no, dt_prop_get(dt_root, "system-id"),
+ OPAL_SYS_SERIAL_LEN);
+ *pel_offset += MTMS_SECTION_SIZE;
+}
+
+/* Create extended header section */
+static void create_extended_header_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ const char *opalmodel = NULL;
+ uint64_t extd_time;
+
+ struct opal_extended_header_section *extdhdr =
+ (struct opal_extended_header_section *)
+ (pel_buffer + *pel_offset);
+
+ extdhdr->v6header.id = ELOG_SID_EXTENDED_HEADER;
+ extdhdr->v6header.length = EXTENDED_HEADER_SECTION_SIZE;
+ extdhdr->v6header.version = OPAL_EXT_HRD_VER;
+ extdhdr->v6header.subtype = 0;
+ extdhdr->v6header.component_id = elog_data->component_id;
+
+ memset(extdhdr->model, 0x00, sizeof(extdhdr->model));
+ opalmodel = dt_prop_get(dt_root, "model");
+ memcpy(extdhdr->model, opalmodel, OPAL_SYS_MODEL_LEN);
+
+ memset(extdhdr->serial_no, 0x00, sizeof(extdhdr->serial_no));
+ memcpy(extdhdr->serial_no, dt_prop_get(dt_root, "system-id"),
+ OPAL_SYS_SERIAL_LEN);
+
+ memset(extdhdr->opal_release_version, 0x00,
+ sizeof(extdhdr->opal_release_version));
+ memset(extdhdr->opal_subsys_version, 0x00,
+ sizeof(extdhdr->opal_subsys_version));
+
+ fsp_rtc_get_cached_tod(&extdhdr->extended_header_date, &extd_time);
+ extdhdr->extended_header_time = extd_time >> 32;
+ extdhdr->opal_symid_len = 0;
+ memset(extdhdr->opalsymid, 0x00, sizeof(extdhdr->opalsymid));
+
+ *pel_offset += EXTENDED_HEADER_SECTION_SIZE;
+}
+
+/* set src type */
+static void settype(struct opal_src_section *src, uint8_t src_type)
+{
+ char type[4];
+ sprintf(type, "%02X", src_type);
+ memcpy(src->srcstring, type, 2);
+}
+
+/* set SRC subsystem type */
+static void setsubsys(struct opal_src_section *src, uint8_t src_subsys)
+{
+ char subsys[4];
+ sprintf(subsys, "%02X", src_subsys);
+ memcpy(src->srcstring+2, subsys, 2);
+}
+
+/* Ser reason code of SRC */
+static void setrefcode(struct opal_src_section *src, uint16_t src_refcode)
+{
+ char refcode[8];
+ sprintf(refcode, "%04X", src_refcode);
+ memcpy(src->srcstring+4, refcode, 4);
+}
+
+/* Create SRC section of OPAL log */
+static void create_src_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ struct opal_src_section *src = (struct opal_src_section *)
+ (pel_buffer + *pel_offset);
+
+ src->v6header.id = ELOG_SID_PRIMARY_SRC;
+ src->v6header.length = SRC_SECTION_SIZE;
+ src->v6header.version = OPAL_ELOG_VERSION;
+ src->v6header.subtype = OPAL_ELOG_SST;
+ src->v6header.component_id = elog_data->component_id;
+
+ src->version = OPAL_SRC_SEC_VER;
+ src->flags = 0;
+ src->wordcount = OPAL_SRC_MAX_WORD_COUNT;
+ src->srclength = SRC_LENGTH;
+ settype(src, OPAL_SRC_TYPE_ERROR);
+ setsubsys(src, OPAL_FAILING_SUBSYSTEM);
+ setrefcode(src, elog_data->reason_code);
+ memset(src->hexwords, 0 , (8 * 4));
+ src->hexwords[0] = OPAL_SRC_FORMAT;
+ src->hexwords[4] = elog_data->additional_info[0];
+ src->hexwords[5] = elog_data->additional_info[1];
+ src->hexwords[6] = elog_data->additional_info[2];
+ src->hexwords[7] = elog_data->additional_info[3];
+ *pel_offset += SRC_SECTION_SIZE;
+}
+
+/* Create user header section */
+static void create_user_header_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ struct opal_user_header_section *usrhdr =
+ (struct opal_user_header_section *)
+ (pel_buffer + *pel_offset);
+
+ usrhdr->v6header.id = ELOG_SID_USER_HEADER;
+ usrhdr->v6header.length = USER_HEADER_SECTION_SIZE;
+ usrhdr->v6header.version = OPAL_ELOG_VERSION;
+ usrhdr->v6header.subtype = OPAL_ELOG_SST;
+ usrhdr->v6header.component_id = elog_data->component_id;
+
+ usrhdr->subsystem_id = elog_data->subsystem_id;
+ usrhdr->event_scope = 0;
+ usrhdr->event_severity = elog_data->event_severity;
+ usrhdr->event_type = elog_data->event_subtype;
+
+ if (elog_data->elog_origin == ORG_SAPPHIRE)
+ usrhdr->action_flags = ERRL_ACTION_REPORT;
+ else
+ usrhdr->action_flags = ERRL_ACTION_NONE;
+
+ *pel_offset += USER_HEADER_SECTION_SIZE;
+}
+
+/* Create private header section */
+static void create_private_header_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ uint64_t ctime;
+ struct opal_private_header_section *privhdr =
+ (struct opal_private_header_section *)
+ pel_buffer;
+
+ privhdr->v6header.id = ELOG_SID_PRIVATE_HEADER;
+ privhdr->v6header.length = PRIVATE_HEADER_SECTION_SIZE;
+ privhdr->v6header.version = OPAL_ELOG_VERSION;
+ privhdr->v6header.subtype = OPAL_ELOG_SST;
+ privhdr->v6header.component_id = elog_data->component_id;
+
+ fsp_rtc_get_cached_tod(&privhdr->create_date, &ctime);
+ privhdr->create_time = ctime >> 32;
+ privhdr->section_count = 5;
+
+ privhdr->creator_subid_hi = 0x00;
+ privhdr->creator_subid_lo = 0x00;
+
+ if (elog_data->elog_origin == ORG_SAPPHIRE) {
+ privhdr->plid = ++sapphire_elog_id;
+ privhdr->creator_id = OPAL_CID_SAPPHIRE;
+ } else {
+ privhdr->plid = ++powernv_elog_id;
+ privhdr->creator_id = OPAL_CID_POWERNV;
+ }
+ privhdr->log_entry_id = 0x00; /* entry id is updated by FSP */
+
+ *pel_offset += PRIVATE_HEADER_SECTION_SIZE;
+}
+
+static void create_user_defined_section(struct opal_errorlog *elog_data,
+ char *pel_buffer, int *pel_offset)
+{
+ char *dump = (char *)pel_buffer + *pel_offset;
+ char *opal_buf = (char *)elog_data->user_data_dump;
+ struct opal_user_section *usrhdr;
+ struct opal_user_data_section *opal_usr_data;
+ struct opal_private_header_section *privhdr =
+ (struct opal_private_header_section *)pel_buffer;
+ int i;
+
+ for (i = 0; i < elog_data->user_section_count; i++) {
+
+ usrhdr = (struct opal_user_section *)dump;
+ opal_usr_data = (struct opal_user_data_section *)opal_buf;
+
+ usrhdr->v6header.id = ELOG_SID_USER_DEFINED;
+ usrhdr->v6header.version = OPAL_ELOG_VERSION;
+ usrhdr->v6header.length = sizeof(struct opal_v6_header) +
+ opal_usr_data->size;
+ usrhdr->v6header.subtype = OPAL_ELOG_SST;
+ usrhdr->v6header.component_id = elog_data->component_id;
+
+ memcpy(usrhdr->dump, opal_buf, opal_usr_data->size);
+ *pel_offset += usrhdr->v6header.length;
+ dump += usrhdr->v6header.length;
+ opal_buf += opal_usr_data->size;
+ privhdr->section_count++;
+ }
+}
+
+/* Create all require section of PEL log and write to TCE buffer */
+static int create_opal_event(struct opal_errorlog *elog_data, char *pel_buffer)
+{
+ int pel_offset = 0;
+
+ memset(pel_buffer, 0, PSI_DMA_ERRLOG_WRITE_BUF_SZ);
+
+ create_private_header_section(elog_data, pel_buffer, &pel_offset);
+ create_user_header_section(elog_data, pel_buffer, &pel_offset);
+ create_src_section(elog_data, pel_buffer, &pel_offset);
+ create_extended_header_section(elog_data, pel_buffer, &pel_offset);
+ create_mtms_section(elog_data, pel_buffer, &pel_offset);
+ if (elog_data->user_section_count)
+ create_user_defined_section(elog_data, pel_buffer, &pel_offset);
+
+ return pel_offset;
+}
+
+/* Pre-allocate memory for writing error log to FSP */
+static int init_elog_write_free_list(uint32_t num_entries)
+{
+ struct opal_errorlog *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct opal_errorlog) * num_entries);
+ if (!entry)
+ goto out_err;
+
+ for (i = 0; i < num_entries; ++i) {
+ list_add_tail(&elog_write_free, &entry->link);
+ entry++;
+ }
+
+ /* Pre-allocate one single buffer for PANIC path */
+ panic_write_buffer = zalloc(sizeof(struct opal_errorlog));
+ if (!panic_write_buffer)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/* fsp elog init function */
+void fsp_elog_write_init(void)
+{
+ if (!fsp_present())
+ return;
+
+ elog_panic_write_buffer = memalign(TCE_PSIZE,
+ ELOG_PANIC_WRITE_BUFFER_SIZE);
+ if (!elog_panic_write_buffer) {
+ prerror("FSP: could not allocate ELOG_PANIC_WRITE_BUFFER!\n");
+ return;
+ }
+
+ elog_write_buffer = memalign(TCE_PSIZE, ELOG_WRITE_BUFFER_SIZE);
+ if (!elog_write_buffer) {
+ prerror("FSP: could not allocate ELOG_WRITE_BUFFER!\n");
+ return;
+ }
+
+ /* Map TCEs */
+ fsp_tce_map(PSI_DMA_ELOG_PANIC_WRITE_BUF, elog_panic_write_buffer,
+ PSI_DMA_ELOG_PANIC_WRITE_BUF_SZ);
+
+ fsp_tce_map(PSI_DMA_ERRLOG_WRITE_BUF, elog_write_buffer,
+ PSI_DMA_ERRLOG_WRITE_BUF_SZ);
+
+ /* pre-allocate memory for 128 records */
+ if (init_elog_write_free_list(ELOG_WRITE_MAX_RECORD)) {
+ prerror("ELOG: Cannot allocate WRITE buffers to log errors!\n");
+ return;
+ }
+
+ /* register opal Interface */
+ opal_register(OPAL_ELOG_SEND, opal_commit_log_to_fsp, 1);
+}
diff --git a/hw/fsp/fsp-leds.c b/hw/fsp/fsp-leds.c
new file mode 100644
index 0000000..69b0583
--- /dev/null
+++ b/hw/fsp/fsp-leds.c
@@ -0,0 +1,1080 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * LED location code and indicator handling
+ */
+#include <skiboot.h>
+#include <processor.h>
+#include <io.h>
+#include <fsp.h>
+#include <console.h>
+#include <timebase.h>
+#include <device.h>
+#include <fsp-leds.h>
+#include <stdio.h>
+#include <spcn.h>
+#include <timebase.h>
+#include <hdata/spira.h>
+#include <hdata/hdata.h>
+#include <fsp-elog.h>
+
+/* Debug prefix */
+#define PREFIX "FSPLED: "
+
+#define buf_write(p, type, val) do { *(type *)(p) = val;\
+ p += sizeof(type); } while(0)
+#define buf_read(p, type, addr) do { *addr = *(type *)(p);\
+ p += sizeof(type); } while(0)
+
+//#define DBG(fmt...) do { printf(PREFIX fmt); } while(0)
+#define DBG(fmt...) do { } while(0)
+
+/* SPCN replay threshold */
+#define SPCN_REPLAY_THRESHOLD 2
+
+/* Sapphire LED support */
+static bool led_support;
+
+/*
+ * PSI mapped buffer for LED data
+ *
+ * Mapped once and never unmapped. Used for fetching all
+ * available LED information and creating the list. Also
+ * used for setting individual LED state.
+ *
+ */
+static void *led_buffer;
+
+/* Maintain list of all LEDs
+ *
+ * The contents here will be used to cater requests from FSP
+ * async commands and HV initiated OPAL calls.
+ */
+static struct list_head cec_ledq; /* CEC LED list */
+static struct list_head encl_ledq; /* Enclosure LED list */
+
+/* LED lock */
+static struct lock led_lock = LOCK_UNLOCKED;
+
+/* Last SPCN command */
+static u32 last_spcn_cmd;
+static int replay = 0;
+
+
+static void fsp_leds_query_spcn(void);
+static void fsp_read_leds_data_complete(struct fsp_msg *msg);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_SPCN, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_BUFF, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_LC, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_STATE, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LED_SUPPORT, OPAL_PLATFORM_ERR_EVT, OPAL_LED,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA, NULL);
+
+/* Find descendent LED record with CEC location code in CEC list */
+static struct fsp_led_data * fsp_find_cec_led(char * loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (strcmp(led->loc_code, loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Find encl LED record with ENCL location code in ENCL list */
+static struct fsp_led_data * fsp_find_encl_led(char * loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&encl_ledq, led, next, link) {
+ if (strcmp(led->loc_code, loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Find encl LED record with CEC location code in CEC list */
+static struct fsp_led_data * fsp_find_encl_cec_led(char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (strstr(led->loc_code, "-"))
+ continue;
+ if (!strstr(loc_code, led->loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Find encl LED record with CEC location code in ENCL list */
+static struct fsp_led_data * fsp_find_encl_encl_led(char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ list_for_each_safe(&encl_ledq, led, next, link) {
+ if (!strstr(loc_code, led->loc_code))
+ continue;
+ return led;
+ }
+ return NULL;
+}
+
+/* Compute the ENCL LED status in CEC list */
+static void compute_encl_status_cec(struct fsp_led_data *encl_led)
+{
+ struct fsp_led_data *led, *next;
+
+ encl_led->status &= ~SPCN_LED_IDENTIFY_MASK;
+ encl_led->status &= ~SPCN_LED_FAULT_MASK;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (!strstr(led->loc_code, encl_led->loc_code))
+ continue;
+
+ /* Dont count the enclsure LED itself */
+ if (!strcmp(led->loc_code, encl_led->loc_code))
+ continue;
+
+ if (led->status & SPCN_LED_IDENTIFY_MASK)
+ encl_led->status |= SPCN_LED_IDENTIFY_MASK;
+
+ if (led->status & SPCN_LED_FAULT_MASK)
+ encl_led->status |= SPCN_LED_FAULT_MASK;
+ }
+}
+
+/* Is a enclosure LED */
+static bool is_enclosure_led(char *loc_code)
+{
+ if (strstr(loc_code, "-"))
+ return false;
+ if (!fsp_find_cec_led(loc_code) || !fsp_find_encl_led(loc_code))
+ return false;
+ return true;
+}
+
+/*
+ * Update both the local LED lists to reflect upon led state changes
+ * occured with the recent SPCN command. Subsequent LED requests will
+ * be served with these updates changed to the list.
+ */
+static void update_led_list(char *loc_code, u32 led_state)
+{
+ struct fsp_led_data *led = NULL, *encl_led = NULL, *encl_cec_led = NULL;
+ bool is_encl_led = is_enclosure_led(loc_code);
+
+ if (is_encl_led)
+ goto enclosure;
+
+ /* Descendant LED in CEC list */
+ led = fsp_find_cec_led(loc_code);
+ if (!led) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find descendent LED in CEC LC=%s\n",
+ loc_code);
+ return;
+ }
+ led->status = led_state;
+
+enclosure:
+ /* Enclosure LED in CEC list */
+ encl_cec_led = fsp_find_encl_cec_led(loc_code);
+ if (!encl_cec_led) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find enclosure LED in CEC LC=%s\n",
+ loc_code);
+ return;
+ }
+
+ /* Enclosure LED in ENCL list */
+ encl_led = fsp_find_encl_encl_led(loc_code);
+ if (!encl_led) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find enclosure LED in ENCL LC=%s\n",
+ loc_code);
+ return;
+ }
+
+ /* Compute descendent rolled up status */
+ compute_encl_status_cec(encl_cec_led);
+
+ /* Check whether exclussive bits set */
+ if (encl_cec_led->excl_bit & FSP_LED_EXCL_FAULT)
+ encl_cec_led->status |= SPCN_LED_FAULT_MASK;
+
+ if (encl_cec_led->excl_bit & FSP_LED_EXCL_IDENTIFY)
+ encl_cec_led->status |= SPCN_LED_IDENTIFY_MASK;
+
+ /* Copy over */
+ encl_led->status = encl_cec_led->status;
+ encl_led->excl_bit = encl_cec_led->excl_bit;
+}
+
+static void fsp_spcn_set_led_completion(struct fsp_msg *msg)
+{
+ bool fail;
+ u16 ckpt_status;
+ char loc_code[LOC_CODE_SIZE + 1];
+ struct fsp_msg *resp = msg->resp;
+ u32 cmd = FSP_RSP_SET_LED_STATE;
+ u8 status = resp->word1 & 0xff00;
+
+ /*
+ * LED state update request came as part of FSP async message
+ * FSP_CMD_SET_LED_STATE, hence need to send response message.
+ */
+ fail = (status == FSP_STATUS_INVALID_DATA) ||
+ (status == FSP_STATUS_DMA_ERROR) ||
+ (status == FSP_STATUS_SPCN_ERROR);
+
+ /* SPCN command failed: Identify the command and roll back changes */
+ if (fail) {
+ log_simple_error(&e_info(OPAL_RC_LED_SPCN),
+ "LED: Last SPCN command failed, status=%02x\n",
+ status);
+ cmd |= FSP_STATUS_GENERIC_ERROR;
+
+ /* Identify the failed command */
+ memset(loc_code, 0, sizeof(loc_code));
+ strncpy(loc_code,
+ ((struct fsp_led_data *)(msg->user_data))->loc_code,
+ LOC_CODE_SIZE);
+ ckpt_status = ((struct fsp_led_data *)(msg->user_data))
+ ->ckpt_status;
+
+ /* Rollback the changes */
+ update_led_list(loc_code, ckpt_status);
+ }
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+}
+
+/*
+ * Set the state of the LED pointed by the location code
+ *
+ * LED command: FAULT state or IDENTIFY state
+ * LED state : OFF (reset) or ON (set)
+ *
+ * SPCN TCE mapped buffer entries for setting LED state
+ *
+ * struct spcn_led_data {
+ * u8 lc_len;
+ * u16 state;
+ * char lc_code[LOC_CODE_SIZE];
+ *};
+ */
+static int fsp_msg_set_led_state(char *loc_code, bool command, bool state)
+{
+ struct spcn_led_data sled;
+ struct fsp_msg *msg = NULL;
+ struct fsp_led_data *led = NULL;
+ void *buf = led_buffer;
+ u16 data_len = 0;
+ u32 cmd_hdr = 0;
+ int rc = 0;
+
+ sled.lc_len = strlen(loc_code);
+ strncpy(sled.lc_code, loc_code, sled.lc_len);
+
+ /* Location code length + Location code + LED control */
+ data_len = LOC_CODE_LEN + sled.lc_len + LED_CONTROL_LEN;
+ cmd_hdr = SPCN_MOD_SET_LED_CTL_LOC_CODE << 24 | SPCN_CMD_SET << 16 |
+ data_len;
+
+ /* Fetch the current state of LED */
+ led = fsp_find_cec_led(loc_code);
+
+ /* LED not present */
+ if (led == NULL) {
+ u32 cmd = 0;
+ int rc = -1;
+
+ cmd = FSP_RSP_SET_LED_STATE | FSP_STATUS_INVALID_LC;
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+ return rc;
+ }
+
+ /*
+ * Checkpoint the status here, will use it if the SPCN
+ * command eventually fails.
+ */
+ led->ckpt_status = led->status;
+ sled.state = led->status;
+
+ /* Update the exclussive LED bits */
+ if (is_enclosure_led(loc_code)) {
+ if (command == LED_COMMAND_FAULT) {
+ if (state == LED_STATE_ON)
+ led->excl_bit |= FSP_LED_EXCL_FAULT;
+ if (state == LED_STATE_OFF)
+ led->excl_bit &= ~FSP_LED_EXCL_FAULT;
+ }
+
+ if (command == LED_COMMAND_IDENTIFY) {
+ if (state == LED_STATE_ON)
+ led->excl_bit |= FSP_LED_EXCL_IDENTIFY;
+ if (state == LED_STATE_OFF)
+ led->excl_bit &= ~FSP_LED_EXCL_IDENTIFY;
+ }
+ }
+
+ /* LED FAULT commad */
+ if (command == LED_COMMAND_FAULT) {
+ if (state == LED_STATE_ON)
+ sled.state |= SPCN_LED_FAULT_MASK;
+ if (state == LED_STATE_OFF)
+ sled.state &= ~SPCN_LED_FAULT_MASK;
+ }
+
+ /* LED IDENTIFY command */
+ if (command == LED_COMMAND_IDENTIFY){
+ if (state == LED_STATE_ON)
+ sled.state |= SPCN_LED_IDENTIFY_MASK;
+ if (state == LED_STATE_OFF)
+ sled.state &= ~SPCN_LED_IDENTIFY_MASK;
+ }
+
+ /* Write into SPCN TCE buffer */
+ buf_write(buf, u8, sled.lc_len); /* Location code length */
+ strncpy(buf, sled.lc_code, sled.lc_len); /* Location code */
+ buf += sled.lc_len;
+ buf_write(buf, u16, sled.state); /* LED state */
+
+ msg = fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_hdr, 0, PSI_DMA_LED_BUF);
+ /*
+ * Update the local lists based on the attempted SPCN command to
+ * set/reset an individual led (CEC or ENCL).
+ */
+ lock(&led_lock);
+ update_led_list(loc_code, sled.state);
+ msg->user_data = led;
+ unlock(&led_lock);
+
+ rc = fsp_queue_msg(msg, fsp_spcn_set_led_completion);
+ return rc;
+}
+
+/*
+ * Write single location code information into the TCE outbound buffer
+ *
+ * Data layout
+ *
+ * 2 bytes - Length of location code structure
+ * 4 bytes - CCIN in ASCII
+ * 1 byte - Resource status flag
+ * 1 byte - Indicator state
+ * 1 byte - Raw loc code length
+ * 1 byte - Loc code field size
+ * Field size byte - Null terminated ASCII string padded to 4 byte boundary
+ *
+ */
+static u32 fsp_push_data_to_tce(struct fsp_led_data *led, u8 *out_data,
+ u32 total_size)
+{
+ struct fsp_loc_code_data lcode;
+
+ /* CCIN value is irrelevant */
+ lcode.ccin = 0x0;
+
+ lcode.status = FSP_IND_NOT_IMPLMNTD;
+
+ if (led->parms & SPCN_LED_IDENTIFY_MASK)
+ lcode.status = FSP_IND_IMPLMNTD;
+
+ /* LED indicator status */
+ lcode.ind_state = FSP_IND_INACTIVE;
+ if (led->status & SPCN_LED_IDENTIFY_MASK)
+ lcode.ind_state |= FSP_IND_IDENTIFY_ACTV;
+ if (led->status & SPCN_LED_FAULT_MASK)
+ lcode.ind_state |= FSP_IND_FAULT_ACTV;
+
+ /* Location code */
+ memset(lcode.loc_code, 0, LOC_CODE_SIZE);
+ lcode.raw_len = strlen(led->loc_code);
+ strncpy(lcode.loc_code, led->loc_code, lcode.raw_len);
+ lcode.fld_sz = sizeof(lcode.loc_code);
+
+ /* Rest of the structure */
+ lcode.size = sizeof(lcode);
+ lcode.status &= 0x0f;
+
+ /*
+ * Check for outbound buffer overflow. If there are still
+ * more LEDs to be sent across to FSP, dont send, ignore.
+ */
+ if ((total_size + lcode.size) > PSI_DMA_LOC_COD_BUF_SZ)
+ return 0;
+
+ /* Copy over to the buffer */
+ memcpy(out_data, &lcode, sizeof(lcode));
+
+ return lcode.size;
+}
+
+/*
+ * Send out LED information structure pointed by "loc_code"
+ * to FSP through the PSI DMA mapping. Buffer layout structure
+ * must be followed.
+ */
+static void fsp_ret_loc_code_list(u16 req_type, char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+
+ u8 *data; /* Start of TCE mapped buffer */
+ u8 *out_data; /* Start of location code data */
+ u32 bytes_sent = 0, total_size = 0;
+ u16 header_size = 0, flags = 0;
+
+ /* Init the addresses */
+ data = (u8 *) PSI_DMA_LOC_COD_BUF;
+ out_data = NULL;
+
+ /* Unmapping through FSP_CMD_RET_LOC_BUFFER command */
+ fsp_tce_map(PSI_DMA_LOC_COD_BUF, (void*)data, PSI_DMA_LOC_COD_BUF_SZ);
+ out_data = data + 8;
+
+ /* CEC LED list */
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ /*
+ * When the request type is system wide led list
+ * i.e GET_LC_CMPLT_SYS, send the entire contents
+ * of the CEC list including both all descendents
+ * and all of their enclosures.
+ */
+
+ if (req_type == GET_LC_ENCLOSURES)
+ break;
+
+ if (req_type == GET_LC_ENCL_DESCENDANTS) {
+ if (strstr(led->loc_code, loc_code) == NULL)
+ continue;
+ }
+
+ if (req_type == GET_LC_SINGLE_LOC_CODE) {
+ if (strcmp(led->loc_code, loc_code))
+ continue;
+ }
+
+ /* Push the data into TCE buffer */
+ bytes_sent = 0;
+ bytes_sent = fsp_push_data_to_tce(led, out_data, total_size);
+
+ /* Advance the TCE pointer */
+ out_data += bytes_sent;
+ total_size += bytes_sent;
+ }
+
+ /* Enclosure LED list */
+ if (req_type == GET_LC_ENCLOSURES) {
+ list_for_each_safe(&encl_ledq, led, next, link) {
+
+ /* Push the data into TCE buffer */
+ bytes_sent = 0;
+ bytes_sent = fsp_push_data_to_tce(led,
+ out_data, total_size);
+
+ /* Advance the TCE pointer */
+ out_data += bytes_sent;
+ total_size += bytes_sent;
+ }
+ }
+
+ /* Count from 'data' instead of 'data_out' */
+ total_size += 8;
+ memcpy(data, &total_size, sizeof(total_size));
+
+ header_size = OUTBUF_HEADER_SIZE;
+ memcpy(data + sizeof(total_size), &header_size, sizeof(header_size));
+
+ if (req_type == GET_LC_ENCL_DESCENDANTS)
+ flags = 0x8000;
+
+ memcpy(data + sizeof(total_size) + sizeof(header_size), &flags,
+ sizeof(flags));
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_LIST,
+ 3, 0, PSI_DMA_LOC_COD_BUF, total_size),
+ fsp_freemsg);
+}
+
+/*
+ * FSP async command: FSP_CMD_GET_LED_LIST
+ *
+ * (1) FSP sends the list of location codes through inbound buffer
+ * (2) HV sends the status of those location codes through outbound buffer
+ *
+ * Inbound buffer data layout (loc code request structure)
+ *
+ * 2 bytes - Length of entire structure
+ * 2 bytes - Request type
+ * 1 byte - Raw length of location code
+ * 1 byte - Location code field size
+ * `Field size` bytes - NULL terminated ASCII location code string
+ */
+void fsp_get_led_list(struct fsp_msg *msg)
+{
+ struct fsp_loc_code_req req;
+ u32 tce_token = msg->data.words[1];
+ void *buf;
+
+ /* Parse inbound buffer */
+ buf = fsp_inbound_buf_from_tce(tce_token);
+ if (!buf) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_LIST |
+ FSP_STATUS_INVALID_DATA,
+ 0), fsp_freemsg);
+ return;
+ }
+ memcpy(&req, buf, sizeof(req));
+
+ printf(PREFIX "Request for loc code list type 0x%04x LC=%s\n",
+ req.req_type, req.loc_code);
+
+ fsp_ret_loc_code_list(req.req_type, req.loc_code);
+}
+
+/*
+ * FSP async command: FSP_CMD_RET_LOC_BUFFER
+ *
+ * With this command FSP returns ownership of the outbound buffer
+ * used by Sapphire to pass the indicator list previous time. That
+ * way FSP tells Sapphire that it has consumed all the data present
+ * on the outbound buffer and Sapphire can reuse it for next request.
+ */
+void fsp_free_led_list_buf(struct fsp_msg *msg)
+{
+ u32 tce_token = msg->data.words[1];
+ u32 cmd = FSP_RSP_RET_LED_BUFFER;
+
+ /* Token does not point to outbound buffer */
+ if (tce_token != PSI_DMA_LOC_COD_BUF) {
+ log_simple_error(&e_info(OPAL_RC_LED_BUFF),
+ "LED: Invalid tce token from FSP\n");
+ cmd |= FSP_STATUS_GENERIC_ERROR;
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+ return;
+ }
+
+ /* Unmap the location code DMA buffer */
+ fsp_tce_unmap(PSI_DMA_LOC_COD_BUF, PSI_DMA_LOC_COD_BUF_SZ);
+
+ /* Respond the FSP */
+ fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg);
+}
+
+static void fsp_ret_led_state(char *loc_code)
+{
+ struct fsp_led_data *led, *next;
+ u8 ind_state = 0;
+
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ if (strcmp(loc_code, led->loc_code))
+ continue;
+
+ /* Found the location code */
+ if (led->status & SPCN_LED_IDENTIFY_MASK)
+ ind_state |= FSP_IND_IDENTIFY_ACTV;
+ if (led->status & SPCN_LED_FAULT_MASK)
+ ind_state |= FSP_IND_FAULT_ACTV;
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_STATE, 1, ind_state),
+ fsp_freemsg);
+ return;
+ }
+
+ /* Location code not found */
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Could not find the location code LC=%s\n", loc_code);
+
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_STATE |
+ FSP_STATUS_INVALID_LC, 1, 0xff), fsp_freemsg);
+}
+
+/*
+ * FSP async command: FSP_CMD_GET_LED_STATE
+ *
+ * With this command FSP query the state for any given LED
+ */
+void fsp_get_led_state(struct fsp_msg *msg)
+{
+ struct fsp_get_ind_state_req req;
+ u32 tce_token = msg->data.words[1];
+ void *buf;
+
+ /* Parse the inbound buffer */
+ buf = fsp_inbound_buf_from_tce(tce_token);
+ if (!buf) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_GET_LED_STATE |
+ FSP_STATUS_INVALID_DATA, 0),
+ fsp_freemsg);
+ return;
+ }
+ memcpy(&req, buf, sizeof(req));
+
+ DBG("%s: tce=0x%08x buf=%p rq.sz=%d rq.lc_len=%d rq.fld_sz=%d"
+ " LC: %02x %02x %02x %02x....\n", __func__,
+ tce_token, buf, req.size, req.lc_len, req.fld_sz,
+ req.loc_code[0], req.loc_code[1],
+ req.loc_code[2], req.loc_code[3]);
+
+ /* Bound check */
+ if (req.lc_len >= LOC_CODE_SIZE) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Loc code too large in %s: %d bytes\n",
+ __func__, req.lc_len);
+ req.lc_len = LOC_CODE_SIZE - 1;
+ }
+ /* Ensure NULL termination */
+ req.loc_code[req.lc_len] = 0;
+
+ /* Do the deed */
+ fsp_ret_led_state(req.loc_code);
+}
+
+/*
+ * FSP async command: FSP_CMD_SET_LED_STATE
+ *
+ * With this command FSP sets/resets the state for any given LED
+ */
+void fsp_set_led_state(struct fsp_msg *msg)
+{
+ struct fsp_set_ind_state_req req;
+ struct fsp_led_data *led, *next;
+ u32 tce_token = msg->data.words[1];
+ bool command, state;
+ void *buf;
+
+ /* Parse the inbound buffer */
+ buf = fsp_inbound_buf_from_tce(tce_token);
+ if (!buf) {
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_SET_LED_STATE |
+ FSP_STATUS_INVALID_DATA,
+ 0), fsp_freemsg);
+ return;
+ }
+ memcpy(&req, buf, sizeof(req));
+
+ DBG("%s: tce=0x%08x buf=%p rq.sz=%d rq.typ=0x%04x rq.lc_len=%d"
+ " rq.fld_sz=%d LC: %02x %02x %02x %02x....\n", __func__,
+ tce_token, buf, req.size, req.lc_len, req.fld_sz,
+ req.req_type,
+ req.loc_code[0], req.loc_code[1],
+ req.loc_code[2], req.loc_code[3]);
+
+ /* Bound check */
+ if (req.lc_len >= LOC_CODE_SIZE) {
+ log_simple_error(&e_info(OPAL_RC_LED_LC),
+ "LED: Loc code too large in %s: %d bytes\n",
+ __func__, req.lc_len);
+ req.lc_len = LOC_CODE_SIZE - 1;
+ }
+ /* Ensure NULL termination */
+ req.loc_code[req.lc_len] = 0;
+
+ /* Decode command */
+ command = (req.ind_state & LOGICAL_IND_STATE_MASK) ?
+ LED_COMMAND_FAULT : LED_COMMAND_IDENTIFY;
+ state = (req.ind_state & ACTIVE_LED_STATE_MASK) ?
+ LED_STATE_ON : LED_STATE_OFF;
+
+ /* Handle requests */
+ switch(req.req_type) {
+ case SET_IND_ENCLOSURE:
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ /* Only descendants of the same enclosure */
+ if (!strstr(led->loc_code, req.loc_code))
+ continue;
+
+ /* Skip the enclosure */
+ if (!strcmp(led->loc_code, req.loc_code))
+ continue;
+
+ if (fsp_msg_set_led_state(led->loc_code,
+ command, state))
+ log_simple_error(&e_info(OPAL_RC_LED_STATE),
+ "LED: Set led state failed at LC=%s\n",
+ led->loc_code);
+ }
+ break;
+ case SET_IND_SINGLE_LOC_CODE:
+ /* Set led state for single descendent led */
+ if (fsp_msg_set_led_state(req.loc_code, command, state))
+ log_simple_error(&e_info(OPAL_RC_LED_STATE),
+ "LED: Set led state failed at LC=%s\n",
+ req.loc_code);
+ break;
+ default:
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_SET_LED_STATE |
+ FSP_STATUS_NOT_SUPPORTED, 0),
+ fsp_freemsg);
+ }
+}
+
+/* Handle received indicator message from FSP */
+static bool fsp_indicator_message(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ /* LED support not available yet */
+ if (!led_support) {
+ log_simple_error(&e_info(OPAL_RC_LED_SUPPORT),
+ PREFIX "Indicator message while LED support not"
+ " available yet\n");
+ return false;
+ }
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_GET_LED_LIST:
+ printf(PREFIX
+ "FSP_CMD_GET_LED_LIST command received\n");
+ fsp_get_led_list(msg);
+ return true;
+ case FSP_CMD_RET_LED_BUFFER:
+ printf(PREFIX
+ "FSP_CMD_RET_LED_BUFFER command received\n");
+ fsp_free_led_list_buf(msg);
+ return true;
+ case FSP_CMD_GET_LED_STATE:
+ printf(PREFIX
+ "FSP_CMD_GET_LED_STATE command received\n");
+ fsp_get_led_state(msg);
+ return true;
+ case FSP_CMD_SET_LED_STATE:
+ printf(PREFIX
+ "FSP_CMD_SET_LED_STATE command received\n");
+ fsp_set_led_state(msg);
+ return true;
+ default:
+ printf(PREFIX
+ "Invalid FSP async sub command %06x\n",
+ cmd_sub_mod);
+ return false;
+ }
+}
+
+/* Indicator class client */
+static struct fsp_client fsp_indicator_client = {
+ .message = fsp_indicator_message,
+};
+
+/*
+ * Process the received LED data from SPCN
+ *
+ * Every LED state data is added into the CEC list. If the location
+ * code is a enclosure type, its added into the enclosure list as well.
+ *
+ */
+static void fsp_process_leds_data(u16 len)
+{
+ struct fsp_led_data *led_data = NULL;
+ void *buf = NULL;
+
+ /*
+ * Process the entire captured data from the last command
+ *
+ * TCE mapped 'led_buffer' contains the fsp_led_data structure
+ * one after the other till the total lenght 'len'.
+ *
+ */
+ buf = led_buffer;
+ while (len) {
+ /* Prepare */
+ led_data = zalloc(sizeof(struct fsp_led_data));
+ assert(led_data);
+
+ /* Resource ID */
+ buf_read(buf, u16, &led_data->rid);
+ len -= sizeof(led_data->rid);
+
+ /* Location code length */
+ buf_read(buf, u8, &led_data->lc_len);
+ len -= sizeof(led_data->lc_len);
+
+ if (led_data->lc_len == 0) {
+ free(led_data);
+ break;
+ }
+
+ /* Location code */
+ strncpy(led_data->loc_code, buf, led_data->lc_len);
+ strcat(led_data->loc_code, "\0");
+
+ buf += led_data->lc_len;
+ len -= led_data->lc_len;
+
+ /* Parameters */
+ buf_read(buf, u16, &led_data->parms);
+ len -= sizeof(led_data->parms);
+
+ /* Status */
+ buf_read(buf, u16, &led_data->status);
+ len -= sizeof(led_data->status);
+
+ /*
+ * This is Enclosure LED's location code, need to go
+ * inside the enclosure LED list as well.
+ */
+ if (!strstr(led_data->loc_code, "-")) {
+ struct fsp_led_data *encl_led_data = NULL;
+ encl_led_data = zalloc(sizeof(struct fsp_led_data));
+ assert(encl_led_data);
+
+ /* copy over the original */
+ encl_led_data->rid = led_data->rid;
+ encl_led_data->lc_len = led_data->lc_len;
+ strncpy(encl_led_data->loc_code, led_data->loc_code,
+ led_data->lc_len);
+ encl_led_data->loc_code[led_data->lc_len] = '\0';
+ encl_led_data->parms = led_data->parms;
+ encl_led_data->status = led_data->status;
+
+ /* Add to the list of enclosure LEDs */
+ list_add_tail(&encl_ledq, &encl_led_data->link);
+ }
+
+ /* Push this onto the list */
+ list_add_tail(&cec_ledq, &led_data->link);
+ }
+}
+
+/* Replay the SPCN command */
+static void replay_spcn_cmd(u32 last_spcn_cmd)
+{
+ u32 cmd_hdr = 0;
+ int rc = 0;
+
+ /* Reached threshold */
+ if (replay == SPCN_REPLAY_THRESHOLD) {
+ replay = 0;
+ return;
+ }
+
+ replay++;
+ if (last_spcn_cmd == SPCN_MOD_PRS_LED_DATA_FIRST) {
+ cmd_hdr = SPCN_MOD_PRS_LED_DATA_FIRST << 24 |
+ SPCN_CMD_PRS << 16;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE,
+ cmd_hdr, 0,
+ PSI_DMA_LED_BUF),
+ fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "Replay SPCN_MOD_PRS_LED_DATA_FIRST"
+ " command could not be queued\n");
+ }
+
+ if (last_spcn_cmd == SPCN_MOD_PRS_LED_DATA_SUB) {
+ cmd_hdr = SPCN_MOD_PRS_LED_DATA_SUB << 24 | SPCN_CMD_PRS << 16;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_hdr,
+ 0, PSI_DMA_LED_BUF),
+ fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "Replay SPCN_MOD_PRS_LED_DATA_SUB"
+ " command could not be queued\n");
+ }
+}
+
+/*
+ * FSP message response handler for following SPCN LED commands
+ * which are used to fetch all of the LED data from SPCN
+ *
+ * 1. SPCN_MOD_PRS_LED_DATA_FIRST --> First 1KB of LED data
+ * 2. SPCN_MOD_PRS_LED_DATA_SUB --> Subsequent 1KB of LED data
+ *
+ * Once the SPCN_RSP_STATUS_SUCCESS response code has been received
+ * indicating the last batch of 1KB LED data is here, the list addition
+ * process is now complete and we enable LED support for FSP async commands
+ * and for OPAL interface.
+ */
+static void fsp_read_leds_data_complete(struct fsp_msg *msg)
+{
+ struct fsp_led_data *led, *next;
+ struct fsp_msg *resp = msg->resp;
+ u32 cmd_hdr = 0;
+ int rc = 0;
+
+ u32 msg_status = resp->word1 & 0xff00;
+ u32 led_status = (resp->data.words[1] >> 24) & 0xff;
+ u16 data_len = (u16)(resp->data.words[1] & 0xffff);
+
+ if (msg_status != FSP_STATUS_SUCCESS) {
+ log_simple_error(&e_info(OPAL_RC_LED_SUPPORT),
+ "LED: FSP returned error %x LED not supported\n",
+ msg_status);
+ /* LED support not available */
+ led_support = false;
+ return;
+ }
+
+ /* SPCN command status */
+ switch (led_status) {
+ /* Last 1KB of LED data */
+ case SPCN_RSP_STATUS_SUCCESS:
+ printf(PREFIX
+ "SPCN_RSP_STATUS_SUCCESS: %d bytes received\n",
+ data_len);
+
+ /* Copy data to the local list */
+ fsp_process_leds_data(data_len);
+ led_support = true;
+
+ /* LEDs captured on the system */
+ printf(PREFIX "CEC LEDs captured on the system:\n");
+ list_for_each_safe(&cec_ledq, led, next, link) {
+ printf(PREFIX "rid: %x\t", led->rid);
+ printf("len: %x ", led->lc_len);
+ printf("lcode: %-30s\t", led->loc_code);
+ printf("parms: %04x\t", led->parms);
+ printf("status: %04x\n", led->status);
+ }
+
+ printf(PREFIX "ENCL LEDs captured on the system:\n");
+ list_for_each_safe(&encl_ledq, led, next, link) {
+ printf(PREFIX "rid: %x\t", led->rid);
+ printf("len: %x ", led->lc_len);
+ printf("lcode: %-30s\t", led->loc_code);
+ printf("parms: %04x\t", led->parms);
+ printf("status: %04x\n", led->status);
+ }
+
+ break;
+
+ /* If more 1KB of LED data present */
+ case SPCN_RSP_STATUS_COND_SUCCESS:
+ printf(PREFIX
+ "SPCN_RSP_STATUS_COND_SUCCESS: %d bytes "
+ " received\n", data_len);
+
+ /* Copy data to the local list */
+ fsp_process_leds_data(data_len);
+
+ /* Fetch the remaining data from SPCN */
+ last_spcn_cmd = SPCN_MOD_PRS_LED_DATA_SUB;
+ cmd_hdr = SPCN_MOD_PRS_LED_DATA_SUB << 24 |
+ SPCN_CMD_PRS << 16;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE,
+ cmd_hdr,
+ 0, PSI_DMA_LED_BUF),
+ fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "SPCN_MOD_PRS_LED_DATA_SUB command"
+ " could not be queued\n");
+ break;
+
+ /* Other expected error codes*/
+ case SPCN_RSP_STATUS_INVALID_RACK:
+ case SPCN_RSP_STATUS_INVALID_SLAVE:
+ case SPCN_RSP_STATUS_INVALID_MOD:
+ case SPCN_RSP_STATUS_STATE_PROHIBIT:
+ case SPCN_RSP_STATUS_UNKNOWN:
+ /* Replay the previous SPCN command */
+ replay_spcn_cmd(last_spcn_cmd);
+ }
+ fsp_freemsg(msg);
+}
+
+/*
+ * Init the LED state
+ *
+ * This is called during the host boot process. This is the place where
+ * we figure out all the LEDs present on the system, their state and then
+ * create structure out of those information and popullate two master lists.
+ * One for all the LEDs on the CEC and one for all the LEDs on the enclosure.
+ * The LED information contained in the lists will cater either to various
+ * FSP initiated async commands or POWERNV initiated OPAL calls. Need to make
+ * sure that this initialization process is complete before allowing any requets
+ * on LED. Also need to be called to re-fetch data from SPCN after any LED state
+ * have been updated.
+ */
+static void fsp_leds_query_spcn()
+{
+ struct fsp_led_data *led = NULL;
+ int rc = 0;
+
+ u32 cmd_hdr = SPCN_MOD_PRS_LED_DATA_FIRST << 24 | SPCN_CMD_PRS << 16;
+
+ /* Till the last batch of LED data */
+ led_support = false;
+ last_spcn_cmd = 0;
+
+ /* Empty the lists */
+ while (!list_empty(&cec_ledq)) {
+ led = list_pop(&cec_ledq, struct fsp_led_data, link);
+ free(led);
+ }
+
+ while (!list_empty(&encl_ledq)) {
+ led = list_pop(&encl_ledq, struct fsp_led_data, link);
+ free(led);
+ }
+
+ /* Allocate buffer with alignment requirements */
+ if (led_buffer == NULL) {
+ led_buffer = memalign(TCE_PSIZE, PSI_DMA_LED_BUF_SZ);
+ if (!led_buffer)
+ return;
+ }
+
+ /* TCE mapping - will not unmap */
+ fsp_tce_map(PSI_DMA_LED_BUF, led_buffer, PSI_DMA_LED_BUF_SZ);
+
+ /* Request the first 1KB of LED data */
+ last_spcn_cmd = SPCN_MOD_PRS_LED_DATA_FIRST;
+ rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_hdr, 0,
+ PSI_DMA_LED_BUF), fsp_read_leds_data_complete);
+ if (rc)
+ printf(PREFIX
+ "SPCN_MOD_PRS_LED_DATA_FIRST command could"
+ " not be queued\n");
+}
+
+/* Init the LED subsystem at boot time */
+void fsp_led_init(void)
+{
+ led_buffer = NULL;
+
+ /* Init the master lists */
+ list_head_init(&cec_ledq);
+ list_head_init(&encl_ledq);
+
+ fsp_leds_query_spcn();
+ printf(PREFIX "Init completed\n");
+
+ /* Handle FSP initiated async LED commands */
+ fsp_register_client(&fsp_indicator_client, FSP_MCLASS_INDICATOR);
+ printf(PREFIX "FSP async command client registered\n");
+}
diff --git a/hw/fsp/fsp-mdst-table.c b/hw/fsp/fsp-mdst-table.c
new file mode 100644
index 0000000..5b29948
--- /dev/null
+++ b/hw/fsp/fsp-mdst-table.c
@@ -0,0 +1,252 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * Sapphire dump design:
+ * - During initialization we setup Memory Dump Source Table (MDST) table
+ * which contains address, size pair.
+ * - We send MDST table update notification to FSP via MBOX command.
+ * - During Sapphire checkstop:
+ * - FSP retrieves HWDUMP.
+ * - FSP retrieves CEC memory based on MDST table.
+ * - Once Sapphire reboot FSP sends new dump avialable notification via HDAT
+ */
+
+#include <fsp.h>
+#include <psi.h>
+#include <opal.h>
+#include <lock.h>
+#include <skiboot.h>
+#include <fsp-elog.h>
+#include <fsp-mdst-table.h>
+
+/*
+ * Sapphire dump size
+ * This is the maximum memory that FSP can retrieve during checkstop.
+ *
+ * Note:
+ * Presently we are hardcoding this parameter. Eventually we need
+ * new System parameter so that we can get max size dynamically.
+ */
+#define MAX_SAPPHIRE_DUMP_SIZE 0x1000000
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_UPDATE, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+
+static struct dump_mdst_table *mdst_table;
+
+static int cur_mdst_entry;
+static int max_mdst_entry;
+static int cur_dump_size;
+/*
+ * Presently both sizes are same.. But if someday FSP gives more space
+ * than our TCE mapping then we need this validation..
+ *
+ * Also once FSP implements MAX_SAPPHIRE_DUMP_SIZE system param, we can
+ * move this validation to separate function.
+ */
+static int max_dump_size = MIN(MAX_SAPPHIRE_DUMP_SIZE, PSI_DMA_HYP_DUMP_SIZE);
+
+/* Protect MDST table entries */
+static struct lock mdst_lock = LOCK_UNLOCKED;
+
+/* Not supported on P7 */
+static inline bool fsp_mdst_supported(void)
+{
+ return proc_gen >= proc_gen_p8;
+}
+
+static void update_mdst_table_complete(struct fsp_msg *msg)
+{
+ uint8_t status = (msg->resp->word1 >> 8) & 0xff;
+
+ if (status)
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE),
+ "MDST: MDST table update failed: 0x%x\n",
+ status);
+ else
+ printf("MDST: Table updated.\n");
+
+ fsp_freemsg(msg);
+}
+
+/* Send MDST table to FSP */
+static int64_t fsp_update_mdst_table(void)
+{
+ struct fsp_msg *msg;
+ int rc = OPAL_SUCCESS;
+
+ if (cur_mdst_entry <= 0) {
+ printf("MDST: Table is empty\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ lock(&mdst_lock);
+ msg = fsp_mkmsg(FSP_CMD_HYP_MDST_TABLE, 4, 0,
+ PSI_DMA_MDST_TABLE,
+ sizeof(*mdst_table) * cur_mdst_entry,
+ sizeof(*mdst_table));
+ unlock(&mdst_lock);
+
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE),
+ "MDST: Message allocation failed.!\n");
+ rc = OPAL_INTERNAL_ERROR;
+ } else if (fsp_queue_msg(msg, update_mdst_table_complete)) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE),
+ "MDST: Failed to queue MDST table message.\n");
+ fsp_freemsg(msg);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+ return rc;
+}
+
+/* Add entry to MDST table */
+static int __mdst_table_add_entry(void *addr, uint32_t type, uint32_t size)
+{
+ int rc = OPAL_INTERNAL_ERROR;
+
+ lock(&mdst_lock);
+
+ if (!mdst_table)
+ goto out;
+
+ if (cur_mdst_entry >= max_mdst_entry) {
+ printf("MDST: Table is full.\n");
+ goto out;
+ }
+
+ /* Make sure we don't cross dump size limit */
+ if (cur_dump_size + size > max_dump_size) {
+ printf("MDST: %d is crossing max dump size (%d) limit.\n",
+ cur_dump_size + size, max_dump_size);
+ goto out;
+ }
+
+ /* TCE mapping */
+ fsp_tce_map(PSI_DMA_HYP_DUMP + cur_dump_size, addr, ALIGN_UP(size, TCE_PSIZE));
+
+ /* Add entry to MDST table */
+ mdst_table[cur_mdst_entry].addr = PSI_DMA_HYP_DUMP + cur_dump_size;
+ mdst_table[cur_mdst_entry].type = type;
+ mdst_table[cur_mdst_entry].size = size;
+
+ /* Update MDST count and dump size */
+ cur_mdst_entry++;
+ cur_dump_size += ALIGN_UP(size, TCE_PSIZE);
+
+ printf("MDST: Addr = 0x%llx [size : %d bytes] added to MDST table.\n",
+ (uint64_t)addr, size);
+
+ rc = OPAL_SUCCESS;
+
+out:
+ unlock(&mdst_lock);
+ return rc;
+}
+
+static int mdst_table_add_entries(void)
+{
+ int rc;
+
+ /* Add console buffer */
+ rc = __mdst_table_add_entry((void *)INMEM_CON_START,
+ DUMP_SECTION_CONSOLE, INMEM_CON_LEN);
+ if (rc)
+ return rc;
+
+ /* Add HBRT buffer */
+ rc = __mdst_table_add_entry((void *)HBRT_CON_START,
+ DUMP_SECTION_HBRT_LOG, HBRT_CON_LEN);
+
+ return rc;
+}
+
+/* TCE mapping */
+static inline void mdst_table_tce_map(void)
+{
+ fsp_tce_map(PSI_DMA_MDST_TABLE, mdst_table, PSI_DMA_MDST_TABLE_SIZE);
+}
+
+/* Initialize MDST table */
+static int mdst_table_init(void)
+{
+ max_mdst_entry = PSI_DMA_MDST_TABLE_SIZE / sizeof(*mdst_table);
+ printf("MDST: Max entries in MDST table : %d\n", max_mdst_entry);
+
+ mdst_table = memalign(TCE_PSIZE, PSI_DMA_MDST_TABLE_SIZE);
+ if (!mdst_table) {
+ log_simple_error(&e_info(OPAL_RC_DUMP_MDST_INIT),
+ "MDST: Failed to allocate memory for MDST table.\n");
+ return -ENOMEM;
+ }
+
+ memset(mdst_table, 0, PSI_DMA_MDST_TABLE_SIZE);
+ mdst_table_tce_map();
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * Handle FSP R/R event.
+ */
+static bool fsp_mdst_update_rr(uint32_t cmd_sub_mod,
+ struct fsp_msg *msg __unused)
+{
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ return true;
+ case FSP_RELOAD_COMPLETE: /* Send MDST to FSP */
+ fsp_update_mdst_table();
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_mdst_client_rr = {
+ .message = fsp_mdst_update_rr,
+};
+
+/* Initialize MDST table and send notification to FSP */
+void fsp_mdst_table_init(void)
+{
+ if (!fsp_present())
+ return;
+
+ if (!fsp_mdst_supported())
+ return;
+
+ /* Initiate MDST */
+ if (mdst_table_init() != OPAL_SUCCESS)
+ return;
+
+ /*
+ * Ignore return code from mdst_table_add_entries so that
+ * we can atleast capture partial dump.
+ */
+ mdst_table_add_entries();
+ fsp_update_mdst_table();
+
+ /* Register for Class AA (FSP R/R) */
+ fsp_register_client(&fsp_mdst_client_rr, FSP_MCLASS_RR_EVENT);
+}
diff --git a/hw/fsp/fsp-mem-err.c b/hw/fsp/fsp-mem-err.c
new file mode 100644
index 0000000..8ebaaee
--- /dev/null
+++ b/hw/fsp/fsp-mem-err.c
@@ -0,0 +1,415 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <opal.h>
+#include <opal-msg.h>
+#include <lock.h>
+#include <fsp.h>
+#include <fsp-elog.h>
+
+/* debug message prefix */
+#define PREFIX "FSPMEMERR: "
+
+/* FSP sends real address of 4K memory page. */
+#define MEM_ERR_PAGE_SIZE_4K (1UL << 12)
+
+/* maximum number of error event to hold until linux consumes it. */
+#define MERR_MAX_RECORD 1024
+
+/* FSP response status */
+#define FSP_RESP_STATUS_GENERIC_FAILURE 0xfe
+
+struct fsp_mem_err_node {
+ struct list_node list;
+ struct OpalMemoryErrorData data;
+};
+
+static LIST_HEAD(merr_free_list);
+static LIST_HEAD(mem_error_list);
+/*
+ * lock is used to protect overwriting of merr_free_list and mem_error_list
+ * list.
+ */
+static struct lock mem_err_lock = LOCK_UNLOCKED;
+
+void mem_err_info_dump(struct opal_errorlog *buf, void *data, uint16_t size);
+
+DEFINE_LOG_ENTRY(OPAL_RC_MEM_ERR_RES, OPAL_PLATFORM_ERR_EVT, OPAL_MEM_ERR,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, mem_err_info_dump);
+
+DEFINE_LOG_ENTRY(OPAL_RC_MEM_ERR_DEALLOC, OPAL_PLATFORM_ERR_EVT, OPAL_MEM_ERR,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, mem_err_info_dump);
+
+void mem_err_info_dump(struct opal_errorlog *buf, void *data, uint16_t size)
+{
+ opal_elog_update_user_dump(buf, data, 0x44455350, size);
+}
+
+static bool send_response_to_fsp(u32 cmd_sub_mod)
+{
+ struct fsp_msg *rsp;
+ int rc = -ENOMEM;
+
+ rsp = fsp_mkmsg(cmd_sub_mod, 0);
+ if (rsp)
+ rc = fsp_queue_msg(rsp, fsp_freemsg);
+ if (rc) {
+ /* XXX Generate error logs */
+ prerror(PREFIX "Error %d queueing FSP memory error"
+ " reply\n", rc);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Queue up the memory error message for delivery.
+ *
+ * queue_event_for_delivery get called from two places.
+ * 1) from queue_mem_err_node when new fsp mem error is available and
+ * 2) from completion callback indicating that linux has consumed an message.
+ *
+ * TODO:
+ * There is a chance that, we may not get a free slot to queue our event
+ * for delivery to linux during both the above invocations. In that case
+ * we end up holding events with us until next fsp memory error comes in.
+ * We need to address this case either here OR fix up messaging infrastructure
+ * to make sure at least one slot will always be available per message type.
+ *
+ * XXX: BenH: I changed the msg infrastructure to attempt an allocation
+ * in that case, at least until we clarify a bit better how
+ * we want to handle things.
+ */
+static void queue_event_for_delivery(void *data __unused)
+{
+ struct fsp_mem_err_node *entry;
+ uint64_t *merr_data;
+ int rc;
+
+ lock(&mem_err_lock);
+ entry = list_pop(&mem_error_list, struct fsp_mem_err_node, list);
+ unlock(&mem_err_lock);
+
+ if (!entry)
+ return;
+
+ /*
+ * struct OpalMemoryErrorData is of (4 * 64 bits) size and well packed
+ * structure. Hence use uint64_t pointer to pass entire structure
+ * using 4 params in generic message format.
+ */
+ merr_data = (uint64_t *)&entry->data;
+
+ /* queue up for delivery */
+ rc = opal_queue_msg(OPAL_MSG_MEM_ERR, NULL,
+ queue_event_for_delivery,
+ merr_data[0], merr_data[1],
+ merr_data[2], merr_data[3]);
+ lock(&mem_err_lock);
+ if (rc) {
+ /*
+ * Failed to queue up the event for delivery. No free slot
+ * available. There is a chance that we are trying to queue
+ * up multiple event at the same time. We may already have
+ * at least one event queued up, in that case we will be
+ * called again through completion callback and we should
+ * be able to grab empty slot then.
+ *
+ * For now, put this node back on mem_error_list.
+ */
+ list_add(&mem_error_list, &entry->list);
+ } else
+ list_add(&merr_free_list, &entry->list);
+ unlock(&mem_err_lock);
+}
+
+static int queue_mem_err_node(struct OpalMemoryErrorData *merr_evt)
+{
+ struct fsp_mem_err_node *entry;
+
+ lock(&mem_err_lock);
+ entry = list_pop(&merr_free_list, struct fsp_mem_err_node, list);
+ if (!entry) {
+ printf(PREFIX "Failed to queue up memory error event.\n");
+ unlock(&mem_err_lock);
+ return -ENOMEM;
+ }
+
+ entry->data = *merr_evt;
+ list_add(&mem_error_list, &entry->list);
+ unlock(&mem_err_lock);
+
+ /* Queue up the event for delivery to OS. */
+ queue_event_for_delivery(NULL);
+ return 0;
+}
+
+/* Check if memory resilience event for same address already exists. */
+static bool is_resilience_event_exist(u64 paddr)
+{
+ struct fsp_mem_err_node *entry;
+ struct OpalMemoryErrorData *merr_evt;
+ int found = 0;
+
+ lock(&mem_err_lock);
+ list_for_each(&mem_error_list, entry, list) {
+ merr_evt = &entry->data;
+ if ((merr_evt->type == OPAL_MEM_ERR_TYPE_RESILIENCE) &&
+ (merr_evt->u.resilience.physical_address_start
+ == paddr)) {
+ found = 1;
+ break;
+ }
+ }
+ unlock(&mem_err_lock);
+ return !!found;
+}
+
+/*
+ * handle Memory Resilience error message.
+ * Section 28.2 of Hypervisor to FSP Mailbox Interface Specification.
+ *
+ * The flow for Memory Resilence Event is:
+ * 1. PRD component in FSP gets a recoverable attention from hardware when
+ * there is a corretable/uncorrectable memory error to free up a page.
+ * 2. PRD sends Memory Resilence Command to hypervisor with the real address of
+ * the 4K memory page in which the error occurred.
+ * 3. The hypervisor acknowledges with a status immediately. Immediate
+ * acknowledgment doesn’t require the freeing of the page to be completed.
+ */
+static bool handle_memory_resilience(u32 cmd_sub_mod, u64 paddr)
+{
+ int rc = 0;
+ u8 err = 0;
+ struct OpalMemoryErrorData mem_err_evt;
+
+ memset(&mem_err_evt, 0, sizeof(struct OpalMemoryErrorData));
+ /* Check arguments */
+ if (paddr == 0) {
+ prerror(PREFIX "memory resilience: Invalid real address.\n");
+ err = FSP_RESP_STATUS_GENERIC_FAILURE;
+ }
+
+ /* If we had an error, send response to fsp and return */
+ if (err)
+ return send_response_to_fsp(FSP_RSP_MEM_RES | err);
+
+ /* Check if event already exist for same address. */
+ if (is_resilience_event_exist(paddr))
+ goto send_response;
+
+ /* Populate an event. */
+ mem_err_evt.version = OpalMemErr_V1;
+ mem_err_evt.type = OPAL_MEM_ERR_TYPE_RESILIENCE;
+
+ switch (cmd_sub_mod) {
+ case FSP_CMD_MEM_RES_CE:
+ /*
+ * Should we keep counter for corrected errors in
+ * sapphire OR let linux (PowerNV) handle it?
+ *
+ * For now, send corrected errors to linux and let
+ * linux handle corrected errors thresholding.
+ */
+ mem_err_evt.flags |= OPAL_MEM_CORRECTED_ERROR;
+ mem_err_evt.u.resilience.resil_err_type =
+ OPAL_MEM_RESILIENCE_CE;
+ break;
+ case FSP_CMD_MEM_RES_UE:
+ mem_err_evt.u.resilience.resil_err_type =
+ OPAL_MEM_RESILIENCE_UE;
+ break;
+ case FSP_CMD_MEM_RES_UE_SCRB:
+ mem_err_evt.u.resilience.resil_err_type =
+ OPAL_MEM_RESILIENCE_UE_SCRUB;
+ break;
+ }
+ mem_err_evt.u.resilience.physical_address_start = paddr;
+ mem_err_evt.u.resilience.physical_address_end =
+ paddr + MEM_ERR_PAGE_SIZE_4K;
+
+ /* Queue up the event and inform OS about it. */
+ rc = queue_mem_err_node(&mem_err_evt);
+
+send_response:
+ /* Queue up an OK response to the resilience message itself */
+ if (!rc)
+ return send_response_to_fsp(FSP_RSP_MEM_RES);
+ else {
+ log_error(&e_info(OPAL_RC_MEM_ERR_RES),
+ &mem_err_evt, sizeof(struct OpalMemoryErrorData),
+ "OPAL_MEM_ERR: Cannot queue up memory "
+ "resilience error event to the OS");
+ return false;
+ }
+}
+
+/* update existing event entry if match is found. */
+static bool update_memory_deallocation_event(u64 paddr_start, u64 paddr_end)
+{
+ struct fsp_mem_err_node *entry;
+ struct OpalMemoryErrorData *merr_evt;
+ int found = 0;
+
+ lock(&mem_err_lock);
+ list_for_each(&mem_error_list, entry, list) {
+ merr_evt = &entry->data;
+ if ((merr_evt->type == OPAL_MEM_ERR_TYPE_DYN_DALLOC) &&
+ (merr_evt->u.dyn_dealloc.physical_address_start
+ == paddr_start)) {
+ found = 1;
+ if (merr_evt->u.dyn_dealloc.physical_address_end
+ < paddr_end)
+ merr_evt->u.dyn_dealloc.physical_address_end
+ = paddr_end;
+ break;
+ }
+ }
+ unlock(&mem_err_lock);
+ return !!found;
+}
+
+/*
+ * Handle dynamic memory deallocation message.
+ *
+ * When a condition occurs in which we need to do a large scale memory
+ * deallocation, PRD will send a starting and ending address of an area of
+ * memory to Hypervisor. Hypervisor then need to use this to deallocate all
+ * pages between and including the addresses.
+ *
+ */
+static bool handle_memory_deallocation(u64 paddr_start, u64 paddr_end)
+{
+ int rc = 0;
+ u8 err = 0;
+ struct OpalMemoryErrorData mem_err_evt;
+
+ memset(&mem_err_evt, 0, sizeof(struct OpalMemoryErrorData));
+ /* Check arguments */
+ if ((paddr_start == 0) || (paddr_end == 0)) {
+ prerror(PREFIX "memory deallocation: Invalid "
+ "starting/ending real address.\n");
+ err = FSP_RESP_STATUS_GENERIC_FAILURE;
+ }
+
+ /* If we had an error, send response to fsp and return */
+ if (err)
+ return send_response_to_fsp(FSP_RSP_MEM_DYN_DEALLOC | err);
+
+ /*
+ * FSP can send dynamic memory deallocation multiple times for the
+ * same address/address ranges. Hence check and update if we already
+ * have sam event queued.
+ */
+ if (update_memory_deallocation_event(paddr_start, paddr_end))
+ goto send_response;
+
+ /* Populate an new event. */
+ mem_err_evt.version = OpalMemErr_V1;
+ mem_err_evt.type = OPAL_MEM_ERR_TYPE_DYN_DALLOC;
+ mem_err_evt.u.dyn_dealloc.dyn_err_type =
+ OPAL_MEM_DYNAMIC_DEALLOC;
+ mem_err_evt.u.dyn_dealloc.physical_address_start = paddr_start;
+ mem_err_evt.u.dyn_dealloc.physical_address_end = paddr_end;
+
+ /* Queue up the event and inform OS about it. */
+ rc = queue_mem_err_node(&mem_err_evt);
+
+send_response:
+ /* Queue up an OK response to the memory deallocation message itself */
+ if (!rc)
+ return send_response_to_fsp(FSP_RSP_MEM_DYN_DEALLOC);
+ else {
+ log_error(&e_info(OPAL_RC_MEM_ERR_DEALLOC),
+ &mem_err_evt, sizeof(struct OpalMemoryErrorData),
+ "OPAL_MEM_ERR: Cannot queue up memory "
+ "deallocation error event to the OS");
+ return false;
+ }
+}
+
+/* Receive a memory error mesages and handle it. */
+static bool fsp_mem_err_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ u64 paddr_start, paddr_end;
+
+ printf(PREFIX "Received 0x%08ux command\n", cmd_sub_mod);
+ switch (cmd_sub_mod) {
+ case FSP_CMD_MEM_RES_CE:
+ case FSP_CMD_MEM_RES_UE:
+ case FSP_CMD_MEM_RES_UE_SCRB:
+ /*
+ * We get the memory relilence command from FSP for
+ * correctable/Uncorrectable/scrub UE errors with real
+ * address of 4K memory page in which the error occured.
+ */
+ paddr_start = *((u64 *)&msg->data.words[0]);
+ printf(PREFIX "Got memory resilience error message for "
+ "paddr=0x%016llux\n", paddr_start);
+ return handle_memory_resilience(cmd_sub_mod, paddr_start);
+ case FSP_CMD_MEM_DYN_DEALLOC:
+ paddr_start = *((u64 *)&msg->data.words[0]);
+ paddr_end = *((u64 *)&msg->data.words[2]);
+ printf(PREFIX "Got dynamic memory deallocation message: "
+ "paddr_start=0x%016llux, paddr_end=0x%016llux\n",
+ paddr_start, paddr_end);
+ return handle_memory_deallocation(paddr_start, paddr_end);
+ }
+ return false;
+}
+
+/*
+ * pre allocate memory to hold maximum of 128 memory error event until linux
+ * consumes it.
+ */
+static int init_merr_free_list(uint32_t num_entries)
+{
+ struct fsp_mem_err_node *entry;
+ int i;
+
+ entry = zalloc(sizeof(struct fsp_mem_err_node) * num_entries);
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; ++i, entry++)
+ list_add_tail(&merr_free_list, &entry->list);
+
+ return 0;
+}
+
+static struct fsp_client fsp_mem_err_client = {
+ .message = fsp_mem_err_msg,
+};
+
+void fsp_memory_err_init(void)
+{
+ int rc;
+
+ printf(PREFIX "Intializing fsp memory handling.\n");
+ /* If we have an FSP, register for notifications */
+ if (!fsp_present())
+ return;
+
+ /* pre allocate memory for 128 record */
+ rc = init_merr_free_list(MERR_MAX_RECORD);
+ if (rc < 0)
+ return;
+
+ fsp_register_client(&fsp_mem_err_client, FSP_MCLASS_MEMORY_ERR);
+}
diff --git a/hw/fsp/fsp-nvram.c b/hw/fsp/fsp-nvram.c
new file mode 100644
index 0000000..b432c37
--- /dev/null
+++ b/hw/fsp/fsp-nvram.c
@@ -0,0 +1,414 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <opal.h>
+#include <lock.h>
+#include <device.h>
+#include <fsp-elog.h>
+
+//#define DBG(fmt...) printf("RTC: " fmt)
+#define DBG(fmt...) do { } while(0)
+
+/*
+ * The FSP NVRAM API operates in "blocks" of 4K. It is entirely exposed
+ * to the OS via the OPAL APIs.
+ *
+ * In order to avoid dealing with complicated read/modify/write state
+ * machines (and added issues related to FSP failover in the middle)
+ * we keep a memory copy of the entire nvram which we load at boot
+ * time. We save only modified blocks.
+ *
+ * To limit the amount of memory used by the nvram image, we limit
+ * how much nvram we support to NVRAM_SIZE. Additionally, this limit
+ * of 1M is the maximum that the CHRP/PAPR nvram partition format
+ * supports for a partition entry.
+ *
+ * (Q: should we save the whole thing in case of FSP failover ?)
+ *
+ * The nvram is expected to comply with the CHRP/PAPR defined format,
+ * and specifically contain a System partition (ID 0x70) named "common"
+ * with configuration variables for the bootloader and a FW private
+ * partition for future use by skiboot.
+ *
+ * If the partition layout appears broken or lacks one of the above
+ * partitions, we reformat the entire nvram at boot time.
+ *
+ * We do not exploit the ability of the FSP to store a checksum. This
+ * is documented as possibly going away. The CHRP format for nvram
+ * that Linux uses has its own (though weak) checksum mechanism already
+ *
+ */
+
+#define NVRAM_BLKSIZE 0x1000
+
+struct nvram_triplet {
+ uint64_t dma_addr;
+ uint32_t blk_offset;
+ uint32_t blk_count;
+} __packed;
+
+#define NVRAM_FLAG_CLEAR_WPEND 0x80000000
+
+enum nvram_state {
+ NVRAM_STATE_CLOSED,
+ NVRAM_STATE_OPENING,
+ NVRAM_STATE_BROKEN,
+ NVRAM_STATE_OPEN,
+ NVRAM_STATE_ABSENT,
+};
+
+static void *fsp_nvram_image;
+static uint32_t fsp_nvram_size;
+static struct lock fsp_nvram_lock = LOCK_UNLOCKED;
+static struct fsp_msg *fsp_nvram_msg;
+static uint32_t fsp_nvram_dirty_start;
+static uint32_t fsp_nvram_dirty_end;
+static bool fsp_nvram_was_read;
+static struct nvram_triplet fsp_nvram_triplet __align(0x1000);
+static enum nvram_state fsp_nvram_state = NVRAM_STATE_CLOSED;
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_INIT, OPAL_PLATFORM_ERR_EVT , OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_OPEN, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_SIZE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_READ, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+static void fsp_nvram_send_write(void);
+
+static void fsp_nvram_wr_complete(struct fsp_msg *msg)
+{
+ struct fsp_msg *resp = msg->resp;
+ uint8_t rc;
+
+ lock(&fsp_nvram_lock);
+ fsp_nvram_msg = NULL;
+
+ /* Check for various errors. If an error occurred,
+ * we generally assume the nvram is completely dirty
+ * but we won't trigger a new write until we get
+ * either a new attempt at writing, or an FSP reset
+ * reload (TODO)
+ */
+ if (!resp || resp->state != fsp_msg_response)
+ goto fail_dirty;
+ rc = (msg->word1 >> 8) & 0xff;
+ switch(rc) {
+ case 0:
+ case 0x44:
+ /* Sync to secondary required... XXX */
+ case 0x45:
+ break;
+ case 0xef:
+ /* Sync to secondary failed, let's ignore that for now,
+ * maybe when (if) we handle redundant FSPs ...
+ */
+ prerror("FSP: NVRAM sync to secondary failed\n");
+ break;
+ default:
+ log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE),
+ "FSP: NVRAM write return error 0x%02x\n", rc);
+ goto fail_dirty;
+ }
+ fsp_freemsg(msg);
+ if (fsp_nvram_dirty_start <= fsp_nvram_dirty_end)
+ fsp_nvram_send_write();
+ unlock(&fsp_nvram_lock);
+ return;
+ fail_dirty:
+ fsp_nvram_dirty_start = 0;
+ fsp_nvram_dirty_end = fsp_nvram_size - 1;
+ fsp_freemsg(msg);
+ unlock(&fsp_nvram_lock);
+}
+
+static void fsp_nvram_send_write(void)
+{
+ uint32_t start = fsp_nvram_dirty_start;
+ uint32_t end = fsp_nvram_dirty_end;
+ uint32_t count;
+
+ if (start > end || fsp_nvram_state != NVRAM_STATE_OPEN)
+ return;
+ count = (end - start) / NVRAM_BLKSIZE + 1;
+ fsp_nvram_triplet.dma_addr = PSI_DMA_NVRAM_BODY + start;
+ fsp_nvram_triplet.blk_offset = start / NVRAM_BLKSIZE;
+ fsp_nvram_triplet.blk_count = count;
+ fsp_nvram_msg = fsp_mkmsg(FSP_CMD_WRITE_VNVRAM, 6,
+ 0, PSI_DMA_NVRAM_TRIPL, 1,
+ NVRAM_FLAG_CLEAR_WPEND, 0, 0);
+ if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_wr_complete)) {
+ fsp_freemsg(fsp_nvram_msg);
+ fsp_nvram_msg = NULL;
+ log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE),
+ "FSP: Error queueing nvram update\n");
+ return;
+ }
+ fsp_nvram_dirty_start = fsp_nvram_size;
+ fsp_nvram_dirty_end = 0;
+}
+
+static void fsp_nvram_rd_complete(struct fsp_msg *msg)
+{
+ int64_t rc;
+
+ lock(&fsp_nvram_lock);
+
+ /* Read complete, check status. What to do if the read fails ?
+ *
+ * Well, there could be various reasons such as an FSP reboot
+ * at the wrong time, but there is really not much we can do
+ * so for now I'll just mark the nvram as closed, and we'll
+ * attempt a re-open and re-read whenever the OS tries to
+ * access it
+ */
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_nvram_msg = NULL;
+ fsp_freemsg(msg);
+ if (rc) {
+ prerror("FSP: NVRAM read failed, will try again later\n");
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ } else {
+ /* nvram was read once, no need to do it ever again */
+ fsp_nvram_was_read = true;
+ fsp_nvram_state = NVRAM_STATE_OPEN;
+
+ /* XXX Here we should look for nvram settings that concern
+ * us such as guest kernel arguments etc...
+ */
+ }
+ unlock(&fsp_nvram_lock);
+}
+
+static void fsp_nvram_send_read(void)
+{
+ fsp_nvram_msg = fsp_mkmsg(FSP_CMD_READ_VNVRAM, 4,
+ 0, PSI_DMA_NVRAM_BODY, 0,
+ fsp_nvram_size / NVRAM_BLKSIZE);
+ if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_rd_complete)) {
+ /* If the nvram read fails to queue, we mark ourselves
+ * closed. Shouldn't have happened anyway. Not much else
+ * we can do.
+ */
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ fsp_freemsg(fsp_nvram_msg);
+ fsp_nvram_msg = NULL;
+ log_simple_error(&e_info(OPAL_RC_NVRAM_READ),
+ "FSP: Error queueing nvram read\n");
+ return;
+ }
+}
+
+static void fsp_nvram_open_complete(struct fsp_msg *msg)
+{
+ int8_t rc;
+
+ lock(&fsp_nvram_lock);
+
+ /* Open complete, check status */
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ fsp_nvram_msg = NULL;
+ fsp_freemsg(msg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_NVRAM_OPEN),
+ "FSP: NVRAM open failed, FSP error 0x%02x\n", rc);
+ goto failed;
+ }
+ if (fsp_nvram_was_read)
+ fsp_nvram_state = NVRAM_STATE_OPEN;
+ else
+ fsp_nvram_send_read();
+ unlock(&fsp_nvram_lock);
+ return;
+ failed:
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ unlock(&fsp_nvram_lock);
+}
+
+static void fsp_nvram_send_open(void)
+{
+ printf("FSP NVRAM: Opening nvram...\n");
+ fsp_nvram_msg = fsp_mkmsg(FSP_CMD_OPEN_VNVRAM, 1, fsp_nvram_size);
+ assert(fsp_nvram_msg);
+ fsp_nvram_state = NVRAM_STATE_OPENING;
+ if (!fsp_queue_msg(fsp_nvram_msg, fsp_nvram_open_complete))
+ return;
+
+ prerror("FSP NVRAM: Failed to queue nvram open message\n");
+ fsp_freemsg(fsp_nvram_msg);
+ fsp_nvram_msg = NULL;
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+}
+
+static bool fsp_nvram_get_size(uint32_t *out_size)
+{
+ struct fsp_msg *msg;
+ int rc, size;
+
+ msg = fsp_mkmsg(FSP_CMD_GET_VNVRAM_SIZE, 0);
+ rc = fsp_sync_msg(msg, false);
+ size = msg->resp ? msg->resp->data.words[0] : 0;
+ fsp_freemsg(msg);
+ if (rc || size == 0) {
+ log_simple_error(&e_info(OPAL_RC_NVRAM_SIZE),
+ "FSP: Error %d nvram size reported is %d\n", rc, size);
+ fsp_nvram_state = NVRAM_STATE_BROKEN;
+ return false;
+ }
+ printf("FSP: NVRAM file size from FSP is %d bytes\n", size);
+ *out_size = size;
+ return true;
+}
+
+static bool fsp_nvram_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ printf("FSP: Closing NVRAM on account of FSP Reset\n");
+ fsp_nvram_state = NVRAM_STATE_CLOSED;
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ printf("FSP: Reopening NVRAM of FSP Reload complete\n");
+ lock(&fsp_nvram_lock);
+ fsp_nvram_send_open();
+ unlock(&fsp_nvram_lock);
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_nvram_client_rr = {
+ .message = fsp_nvram_msg_rr,
+};
+
+int fsp_nvram_info(uint32_t *total_size)
+{
+ if (!fsp_present()) {
+ fsp_nvram_state = NVRAM_STATE_ABSENT;
+ return OPAL_HARDWARE;
+ }
+
+ if (!fsp_nvram_get_size(total_size))
+ return OPAL_HARDWARE;
+ return OPAL_SUCCESS;
+}
+
+int fsp_nvram_start_read(void *dst, uint32_t src, uint32_t len)
+{
+ /* We are currently limited to fully aligned transfers */
+ assert((((uint64_t)dst) & 0xfff) == 0);
+ assert(dst);
+
+ /* Currently don't support src!=0 */
+ assert(src == 0);
+
+ if (!fsp_present())
+ return -ENODEV;
+
+ op_display(OP_LOG, OP_MOD_INIT, 0x0007);
+
+ lock(&fsp_nvram_lock);
+
+ /* Store image info */
+ fsp_nvram_image = dst;
+ fsp_nvram_size = len;
+
+ /* Mark nvram as not dirty */
+ fsp_nvram_dirty_start = len;
+ fsp_nvram_dirty_end = 0;
+
+ /* Map TCEs */
+ fsp_tce_map(PSI_DMA_NVRAM_TRIPL, &fsp_nvram_triplet,
+ PSI_DMA_NVRAM_TRIPL_SZ);
+ fsp_tce_map(PSI_DMA_NVRAM_BODY, dst, PSI_DMA_NVRAM_BODY_SZ);
+
+ /* Register for the reset/reload event */
+ fsp_register_client(&fsp_nvram_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Open and load the nvram from the FSP */
+ fsp_nvram_send_open();
+
+ unlock(&fsp_nvram_lock);
+
+ return 0;
+}
+
+int fsp_nvram_write(uint32_t offset, void *src, uint32_t size)
+{
+ uint64_t end = offset + size - 1;
+
+ /* We only support writing from the original image */
+ if (src != fsp_nvram_image + offset)
+ return OPAL_HARDWARE;
+
+ offset &= ~(NVRAM_BLKSIZE - 1);
+ end &= ~(NVRAM_BLKSIZE - 1);
+
+ lock(&fsp_nvram_lock);
+ /* If the nvram is closed, try re-opening */
+ if (fsp_nvram_state == NVRAM_STATE_CLOSED)
+ fsp_nvram_send_open();
+ if (fsp_nvram_dirty_start > offset)
+ fsp_nvram_dirty_start = offset;
+ if (fsp_nvram_dirty_end < end)
+ fsp_nvram_dirty_end = end;
+ if (!fsp_nvram_msg && fsp_nvram_state == NVRAM_STATE_OPEN)
+ fsp_nvram_send_write();
+ unlock(&fsp_nvram_lock);
+
+ return 0;
+}
+
+/* This is called right before starting the payload (Linux) to
+ * ensure the initial open & read of nvram has happened before
+ * we transfer control as the guest OS. This is necessary as
+ * Linux will not handle a OPAL_BUSY return properly and treat
+ * it as an error
+ */
+void fsp_nvram_wait_open(void)
+{
+ if (!fsp_present())
+ return;
+
+ while(fsp_nvram_state == NVRAM_STATE_OPENING)
+ fsp_poll();
+
+ if (!fsp_nvram_was_read) {
+ log_simple_error(&e_info(OPAL_RC_NVRAM_INIT),
+ "FSP: NVRAM not read, skipping init\n");
+ nvram_read_complete(false);
+ return;
+ }
+
+ nvram_read_complete(true);
+}
diff --git a/hw/fsp/fsp-op-panel.c b/hw/fsp/fsp-op-panel.c
new file mode 100644
index 0000000..e2df34e
--- /dev/null
+++ b/hw/fsp/fsp-op-panel.c
@@ -0,0 +1,249 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <opal.h>
+#include <device.h>
+#include <processor.h>
+#include <opal-msg.h>
+#include <fsp-elog.h>
+
+DEFINE_LOG_ENTRY(OPAL_RC_PANEL_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_OP_PANEL,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+static struct fsp_msg op_msg_resp;
+static struct fsp_msg op_msg = {
+ .resp = &op_msg_resp,
+};
+static struct fsp_msg *op_req;
+static uint64_t op_async_token;
+static struct lock op_lock = LOCK_UNLOCKED;
+
+void op_display(enum op_severity sev, enum op_module mod, uint16_t code)
+{
+ uint32_t w0 = sev << 16 | mod;
+ uint32_t w1;
+ bool clean_lock;
+
+ if (!fsp_present())
+ return;
+
+ w1 = tohex((code >> 12) & 0xf) << 24;
+ w1 |= tohex((code >> 8) & 0xf) << 16;
+ w1 |= tohex((code >> 4) & 0xf) << 8;
+ w1 |= tohex((code ) & 0xf);
+
+ /*
+ * We use lock_recursive to detect recursion. We avoid sending
+ * the message if that happens as this could be a case of a
+ * locking error in the FSP driver for example
+ */
+ clean_lock = lock_recursive(&op_lock);
+ if (!clean_lock)
+ return;
+
+ /* We don't use mkmsg, we use a preallocated msg to avoid
+ * going down the malloc path etc... since this can be called
+ * in case of fatal errors
+ */
+ fsp_fillmsg(&op_msg, FSP_CMD_DISP_SRC_DIRECT, 3, 1, w0, w1);
+ fsp_sync_msg(&op_msg, false);
+ unlock(&op_lock);
+}
+
+void op_panel_disable_src_echo(void)
+{
+ if (!fsp_present())
+ return;
+
+ lock(&op_lock);
+ fsp_fillmsg(&op_msg, FSP_CMD_DIS_SRC_ECHO, 0);
+ fsp_sync_msg(&op_msg, false);
+ unlock(&op_lock);
+}
+
+void op_panel_clear_src(void)
+{
+ if (!fsp_present())
+ return;
+
+ lock(&op_lock);
+ fsp_fillmsg(&op_msg, FSP_CMD_CLEAR_SRC, 0);
+ fsp_sync_msg(&op_msg, false);
+ unlock(&op_lock);
+}
+
+/* opal_write_oppanel - Write to the physical op panel.
+ *
+ * Pass in an array of oppanel_line_t structs defining the ASCII characters
+ * to display on each line of the oppanel. If there are two lines on the
+ * physical panel, and you only want to write to the first line, you only
+ * need to pass in one line. If you only want to write to the second line,
+ * you need to pass in both lines, and set the line_len of the first line
+ * to zero.
+ *
+ * This command is asynchronous. If OPAL_SUCCESS is returned, then the
+ * operation was initiated successfully. Subsequent calls will return
+ * OPAL_BUSY until the current operation is complete.
+ */
+struct op_src {
+ uint8_t version;
+#define OP_SRC_VERSION 2
+ uint8_t flags;
+ uint8_t reserved;
+ uint8_t hex_word_cnt;
+ uint16_t reserved2;
+ uint16_t total_size;
+ uint32_t word2; /* SRC format in low byte */
+ uint32_t word3;
+ uint32_t word4;
+ uint32_t word5;
+ uint32_t word6;
+ uint32_t word7;
+ uint32_t word8;
+ uint32_t word9;
+#define OP_SRC_ASCII_LEN 32
+ uint8_t ascii[OP_SRC_ASCII_LEN]; /* Word 11 */
+} __packed __align(4);
+
+/* Page align for the sake of TCE mapping */
+static struct op_src op_src __align(0x1000);
+
+static void __op_panel_write_complete(struct fsp_msg *msg)
+{
+ fsp_tce_unmap(PSI_DMA_OP_PANEL_MISC, 0x1000);
+ lwsync();
+ op_req = NULL;
+ fsp_freemsg(msg);
+}
+
+static void op_panel_write_complete(struct fsp_msg *msg)
+{
+ uint8_t rc = (msg->resp->word1 >> 8) & 0xff;
+
+ if (rc)
+ prerror("OPPANEL: Error 0x%02x in display command\n", rc);
+
+ __op_panel_write_complete(msg);
+
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, 1, op_async_token);
+}
+
+static int64_t __opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines,
+ uint64_t async_token)
+{
+ int64_t rc = OPAL_ASYNC_COMPLETION;
+ int len;
+ int i;
+
+ if (num_lines < 1 || num_lines > 2)
+ return OPAL_PARAMETER;
+
+ lock(&op_lock);
+
+ /* Only one in flight */
+ if (op_req) {
+ rc = OPAL_BUSY_EVENT;
+ goto bail;
+ }
+
+ op_req = fsp_allocmsg(true);
+ if (!op_req) {
+ rc = OPAL_NO_MEM;
+ goto bail;
+ }
+
+ op_async_token = async_token;
+
+ memset(&op_src, 0, sizeof(op_src));
+
+ op_src.version = OP_SRC_VERSION;
+ op_src.flags = 0;
+ op_src.reserved = 0;
+ op_src.hex_word_cnt = 1; /* header word only */
+ op_src.reserved2 = 0;
+ op_src.total_size = sizeof(op_src);
+ op_src.word2 = 0; /* should be unneeded */
+
+ len = lines[0].line_len > 16 ? 16 : lines[0].line_len;
+
+ memset(op_src.ascii + len, ' ', 16-len);
+ memcpy(op_src.ascii, lines[0].line, len);
+ if (num_lines > 1) {
+ len = lines[1].line_len > 16 ? 16 : lines[1].line_len;
+ memcpy(op_src.ascii + 16, lines[1].line, len);
+ memset(op_src.ascii + 16 + len, ' ', 16-len);
+ }
+
+ for (i = 0; i < sizeof(op_src.ascii); i++) {
+ /*
+ * So, there's this interesting thing if you send
+ * HTML/Javascript through the Operator Panel.
+ * You get to inject it into the ASM web ui!
+ * So we filter out anything suspect here,
+ * at least for the time being.
+ *
+ * Allowed characters:
+ * . / 0-9 : a-z A-Z SPACE
+ */
+ if (! ((op_src.ascii[i] >= '.' && op_src.ascii[i] <= ':') ||
+ (op_src.ascii[i] >= 'a' && op_src.ascii[i] <= 'z') ||
+ (op_src.ascii[i] >= 'A' && op_src.ascii[i] <= 'Z') ||
+ op_src.ascii[i] == ' ')) {
+ op_src.ascii[i] = '.';
+ }
+ }
+
+ fsp_tce_map(PSI_DMA_OP_PANEL_MISC, &op_src, 0x1000);
+
+ fsp_fillmsg(op_req, FSP_CMD_DISP_SRC_INDIR, 3, 0,
+ PSI_DMA_OP_PANEL_MISC, sizeof(struct op_src));
+ rc = fsp_queue_msg(op_req, op_panel_write_complete);
+ if (rc) {
+ __op_panel_write_complete(op_req);
+ rc = OPAL_INTERNAL_ERROR;
+ }
+ bail:
+ unlock(&op_lock);
+ log_simple_error(&e_info(OPAL_RC_PANEL_WRITE),
+ "FSP: Error updating Op Panel: %lld\n", rc);
+ return rc;
+}
+
+static int64_t opal_write_oppanel_async(uint64_t async_token,
+ oppanel_line_t *lines,
+ uint64_t num_lines)
+{
+ return __opal_write_oppanel(lines, num_lines, async_token);
+}
+
+void fsp_oppanel_init(void)
+{
+ struct dt_node *oppanel;
+
+ if (!fsp_present())
+ return;
+
+ opal_register(OPAL_WRITE_OPPANEL_ASYNC, opal_write_oppanel_async, 3);
+
+ oppanel = dt_new(opal_node, "oppanel");
+ dt_add_property_cells(oppanel, "#length", 16);
+ dt_add_property_cells(oppanel, "#lines", 2);
+ dt_add_property_string(oppanel, "compatible", "ibm,opal-oppanel");
+}
diff --git a/hw/fsp/fsp-rtc.c b/hw/fsp/fsp-rtc.c
new file mode 100644
index 0000000..887091a
--- /dev/null
+++ b/hw/fsp/fsp-rtc.c
@@ -0,0 +1,572 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <timebase.h>
+#include <time.h>
+#include <fsp-elog.h>
+
+//#define DBG(fmt...) printf("RTC: " fmt)
+#define DBG(fmt...) do { } while(0)
+
+/*
+ * Note on how those operate:
+ *
+ * Because the RTC calls can be pretty slow, these functions will shoot
+ * an asynchronous request to the FSP (if none is already pending)
+ *
+ * The requests will return OPAL_BUSY_EVENT as long as the event has
+ * not been completed.
+ *
+ * WARNING: An attempt at doing an RTC write while one is already pending
+ * will simply ignore the new arguments and continue returning
+ * OPAL_BUSY_EVENT. This is to be compatible with existing Linux code.
+ *
+ * Completion of the request will result in an event OPAL_EVENT_RTC
+ * being signaled, which will remain raised until a corresponding call
+ * to opal_rtc_read() or opal_rtc_write() finally returns OPAL_SUCCESS,
+ * at which point the operation is complete and the event cleared.
+ *
+ * If we end up taking longer than rtc_read_timeout_ms millieconds waiting
+ * for the response from a read request, we simply return a cached value (plus
+ * an offset calculated from the timebase. When the read request finally
+ * returns, we update our cache value accordingly.
+ *
+ * There is two separate set of state for reads and writes. If both are
+ * attempted at the same time, the event bit will remain set as long as either
+ * of the two has a pending event to signal.
+ */
+
+enum {
+ RTC_TOD_VALID,
+ RTC_TOD_INVALID,
+ RTC_TOD_PERMANENT_ERROR,
+} rtc_tod_state = RTC_TOD_INVALID;
+
+static struct lock rtc_lock;
+static struct fsp_msg *rtc_read_msg;
+static struct fsp_msg *rtc_write_msg;
+/* TODO We'd probably want to export and use this variable declared in fsp.c,
+ * instead of each component individually maintaining the state.. may be for
+ * later optimization
+ */
+static bool fsp_in_reset = false;
+
+/* last synchonisation point */
+static struct {
+ struct tm tm;
+ unsigned long tb;
+ bool dirty;
+} rtc_tod_cache;
+
+/* Timebase value when we last initiated a RTC read request */
+static unsigned long read_req_tb;
+
+/* If a RTC read takes longer than this, we return a value generated
+ * from the cache + timebase */
+static const int rtc_read_timeout_ms = 1500;
+
+DEFINE_LOG_ENTRY(OPAL_RC_RTC_TOD, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_RTC_READ, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+static int days_in_month(int month, int year)
+{
+ static int month_days[] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31,
+ };
+
+ assert(1 <= month && month <= 12);
+
+ /* we may need to update this in the year 4000, pending a
+ * decision on whether or not it's a leap year */
+ if (month == 2) {
+ bool is_leap = !(year % 400) || ((year % 100) && !(year % 4));
+ return is_leap ? 29 : 28;
+ }
+
+ return month_days[month - 1];
+}
+
+static void tm_add(struct tm *in, struct tm *out, unsigned long secs)
+{
+ unsigned long year, month, mday, hour, minute, second, d;
+ static const unsigned long sec_in_400_years =
+ ((3903ul * 365) + (97 * 366)) * 24 * 60 * 60;
+
+ assert(in);
+ assert(out);
+
+ second = in->tm_sec;
+ minute = in->tm_min;
+ hour = in->tm_hour;
+ mday = in->tm_mday;
+ month = in->tm_mon;
+ year = in->tm_year;
+
+ second += secs;
+
+ /* There are the same number of seconds in any 400-year block; this
+ * limits the iterations in the loop below */
+ year += 400 * (second / sec_in_400_years);
+ second = second % sec_in_400_years;
+
+ if (second >= 60) {
+ minute += second / 60;
+ second = second % 60;
+ }
+
+ if (minute >= 60) {
+ hour += minute / 60;
+ minute = minute % 60;
+ }
+
+ if (hour >= 24) {
+ mday += hour / 24;
+ hour = hour % 24;
+ }
+
+ for (d = days_in_month(month, year); mday >= d;
+ d = days_in_month(month, year)) {
+ month++;
+ if (month > 12) {
+ month = 1;
+ year++;
+ }
+ mday -= d;
+ }
+
+ out->tm_year = year;
+ out->tm_mon = month;
+ out->tm_mday = mday;
+ out->tm_hour = hour;
+ out->tm_min = minute;
+ out->tm_sec = second;
+}
+
+/* MSB is byte 3, LSB is byte 0 */
+static unsigned int bcd_byte(uint32_t bcd, int byteno)
+{
+ bcd >>= byteno * 8;
+ return (bcd >> 4 & 0xf) * 10 + (bcd & 0xf);
+}
+
+static uint32_t int_to_bcd2(unsigned int x)
+{
+ return (((x / 10) << 4) & 0xf0) | (x % 10);
+}
+
+static uint32_t int_to_bcd4(unsigned int x)
+{
+ return int_to_bcd2(x / 100) << 8 | int_to_bcd2(x % 100);
+}
+
+static void rtc_to_tm(struct fsp_msg *msg, struct tm *tm)
+{
+ uint32_t x;
+
+ /* The FSP returns in BCD:
+ *
+ * | year | month | mday |
+ * +------------------------------------+
+ * | hour | minute | secs | reserved |
+ * +------------------------------------+
+ * | microseconds |
+ */
+ x = msg->data.words[0];
+ tm->tm_year = bcd_byte(x, 3) * 100 + bcd_byte(x, 2);
+ tm->tm_mon = bcd_byte(x, 1);
+ tm->tm_mday = bcd_byte(x, 0);
+
+ x = msg->data.words[1];
+ tm->tm_hour = bcd_byte(x, 3);
+ tm->tm_min = bcd_byte(x, 2);
+ tm->tm_sec = bcd_byte(x, 1);
+}
+
+static void tm_to_datetime(struct tm *tm, uint32_t *y_m_d, uint64_t *h_m_s_m)
+{
+ uint64_t h_m_s;
+ /*
+ * The OPAL API is defined as returned a u64 of a similar
+ * format to the FSP message; the 32-bit date field is
+ * in the format:
+ *
+ * | year | year | month | day |
+ *
+ */
+ *y_m_d = int_to_bcd4(tm->tm_year) << 16 |
+ int_to_bcd2(tm->tm_mon) << 8 |
+ int_to_bcd2(tm->tm_mday);
+
+ /*
+ * ... and the 64-bit time field is in the format
+ *
+ * | hour | minutes | secs | millisec |
+ * | -------------------------------------
+ * | millisec | reserved |
+ *
+ * We simply ignore the microseconds/milliseconds for now
+ * as I don't quite understand why the OPAL API defines that
+ * it needs 6 digits for the milliseconds :-) I suspect the
+ * doc got that wrong and it's supposed to be micro but
+ * let's ignore it.
+ *
+ * Note that Linux doesn't use nor set the ms field anyway.
+ */
+ h_m_s = int_to_bcd2(tm->tm_hour) << 24 |
+ int_to_bcd2(tm->tm_min) << 16 |
+ int_to_bcd2(tm->tm_sec) << 8;
+
+ *h_m_s_m = h_m_s << 32;
+}
+
+static void fsp_rtc_process_read(struct fsp_msg *read_resp)
+{
+ int val = (read_resp->word1 >> 8) & 0xff;
+
+ switch (val) {
+ case 0xa9:
+ log_simple_error(&e_info(OPAL_RC_RTC_TOD),
+ "RTC TOD in invalid state\n");
+ rtc_tod_state = RTC_TOD_INVALID;
+ break;
+
+ case 0xaf:
+ log_simple_error(&e_info(OPAL_RC_RTC_TOD),
+ "RTC TOD in permanent error state\n");
+ rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
+ break;
+
+ case 0:
+ /* Save the read RTC value in our cache */
+ rtc_to_tm(read_resp, &rtc_tod_cache.tm);
+ rtc_tod_cache.tb = mftb();
+ rtc_tod_state = RTC_TOD_VALID;
+ break;
+
+ default:
+ log_simple_error(&e_info(OPAL_RC_RTC_TOD),
+ "RTC TOD read failed: %d\n", val);
+ rtc_tod_state = RTC_TOD_INVALID;
+ }
+}
+
+static void opal_rtc_eval_events(void)
+{
+ bool pending = false;
+
+ if (rtc_read_msg && !fsp_msg_busy(rtc_read_msg))
+ pending = true;
+ if (rtc_write_msg && !fsp_msg_busy(rtc_write_msg))
+ pending = true;
+ opal_update_pending_evt(OPAL_EVENT_RTC, pending ? OPAL_EVENT_RTC : 0);
+}
+
+static void fsp_rtc_req_complete(struct fsp_msg *msg)
+{
+ lock(&rtc_lock);
+ DBG("RTC completion %p\n", msg);
+ if (msg == rtc_read_msg)
+ fsp_rtc_process_read(msg->resp);
+ opal_rtc_eval_events();
+ unlock(&rtc_lock);
+}
+
+static int64_t fsp_rtc_send_read_request(void)
+{
+ struct fsp_msg *msg;
+ int rc;
+
+ msg = fsp_mkmsg(FSP_CMD_READ_TOD, 0);
+ if (!msg) {
+ log_simple_error(&e_info(OPAL_RC_RTC_READ),
+ "RTC: failed to allocate read message\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ rc = fsp_queue_msg(msg, fsp_rtc_req_complete);
+ if (rc) {
+ fsp_freemsg(msg);
+ log_simple_error(&e_info(OPAL_RC_RTC_READ),
+ "RTC: failed to queue read message: %d\n", rc);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ read_req_tb = mftb();
+ rtc_read_msg = msg;
+
+ return OPAL_BUSY_EVENT;
+}
+
+static void encode_cached_tod(uint32_t *year_month_day,
+ uint64_t *hour_minute_second_millisecond)
+{
+ unsigned long cache_age_sec;
+ struct tm tm;
+
+ cache_age_sec = tb_to_msecs(mftb() - rtc_tod_cache.tb) / 1000;
+
+ tm_add(&rtc_tod_cache.tm, &tm, cache_age_sec);
+
+ /* Format to OPAL API values */
+ tm_to_datetime(&tm, year_month_day, hour_minute_second_millisecond);
+}
+
+int fsp_rtc_get_cached_tod(uint32_t *year_month_day,
+ uint64_t *hour_minute_second_millisecond)
+{
+
+ if (rtc_tod_state != RTC_TOD_VALID)
+ return -1;
+
+ encode_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ return 0;
+}
+
+static int64_t fsp_opal_rtc_read(uint32_t *year_month_day,
+ uint64_t *hour_minute_second_millisecond)
+{
+ struct fsp_msg *msg;
+ int64_t rc;
+
+ if (!year_month_day || !hour_minute_second_millisecond)
+ return OPAL_PARAMETER;
+
+ lock(&rtc_lock);
+ /* During R/R of FSP, read cached TOD */
+ if (fsp_in_reset) {
+ fsp_rtc_get_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ rc = OPAL_SUCCESS;
+ goto out;
+ }
+
+ msg = rtc_read_msg;
+
+ if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
+ if (msg && !fsp_msg_busy(msg))
+ fsp_freemsg(msg);
+ rc = OPAL_HARDWARE;
+ goto out;
+ }
+
+ /* If we don't have a read pending already, fire off a request and
+ * return */
+ if (!msg) {
+ DBG("Sending new RTC read request\n");
+ rc = fsp_rtc_send_read_request();
+
+ /* If our pending read is done, clear events and return the time
+ * from the cache */
+ } else if (!fsp_msg_busy(msg)) {
+ DBG("RTC read complete, state %d\n", rtc_tod_state);
+
+ rtc_read_msg = NULL;
+ opal_rtc_eval_events();
+ fsp_freemsg(msg);
+
+ if (rtc_tod_state == RTC_TOD_VALID) {
+ encode_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ rc = OPAL_SUCCESS;
+ } else
+ rc = OPAL_INTERNAL_ERROR;
+
+ /* Timeout: return our cached value (updated from tb), but leave the
+ * read request pending so it will update the cache later */
+ } else if (mftb() > read_req_tb + msecs_to_tb(rtc_read_timeout_ms)) {
+ DBG("RTC read timed out\n");
+
+ encode_cached_tod(year_month_day,
+ hour_minute_second_millisecond);
+ rc = OPAL_SUCCESS;
+
+ /* Otherwise, we're still waiting on the read to complete */
+ } else {
+ rc = OPAL_BUSY_EVENT;
+ }
+out:
+ unlock(&rtc_lock);
+ return rc;
+}
+
+static int64_t fsp_opal_rtc_write(uint32_t year_month_day,
+ uint64_t hour_minute_second_millisecond)
+{
+ struct fsp_msg *msg;
+ uint32_t w0, w1, w2;
+ int64_t rc;
+
+ lock(&rtc_lock);
+ if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
+ rc = OPAL_HARDWARE;
+ msg = NULL;
+ goto bail;
+ }
+
+ /* Do we have a request already ? */
+ msg = rtc_write_msg;
+ if (msg) {
+ /* If it's still in progress, return */
+ if (fsp_msg_busy(msg)) {
+ /* Don't free the message */
+ msg = NULL;
+ rc = OPAL_BUSY_EVENT;
+ goto bail;
+ }
+
+ DBG("Completed write request @%p, state=%d\n", msg, msg->state);
+ /* It's complete, clear events */
+ rtc_write_msg = NULL;
+ opal_rtc_eval_events();
+
+ /* Check error state */
+ if (msg->state != fsp_msg_done) {
+ DBG(" -> request not in done state -> error !\n");
+ rc = OPAL_INTERNAL_ERROR;
+ goto bail;
+ }
+ rc = OPAL_SUCCESS;
+ goto bail;
+ }
+
+ DBG("Sending new write request...\n");
+
+ /* Create a request and send it. Just like for read, we ignore
+ * the "millisecond" field which is probably supposed to be
+ * microseconds and which Linux ignores as well anyway
+ */
+ w0 = year_month_day;
+ w1 = (hour_minute_second_millisecond >> 32) & 0xffffff00;
+ w2 = 0;
+
+ rtc_write_msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, w0, w1, w2);
+ if (!rtc_write_msg) {
+ DBG(" -> allocation failed !\n");
+ rc = OPAL_INTERNAL_ERROR;
+ goto bail;
+ }
+ DBG(" -> req at %p\n", rtc_write_msg);
+
+ if (fsp_in_reset) {
+ rtc_to_tm(rtc_write_msg, &rtc_tod_cache.tm);
+ rtc_tod_cache.tb = mftb();
+ rtc_tod_cache.dirty = true;
+ fsp_freemsg(rtc_write_msg);
+ rtc_write_msg = NULL;
+ rc = OPAL_SUCCESS;
+ goto bail;
+ } else if (fsp_queue_msg(rtc_write_msg, fsp_rtc_req_complete)) {
+ DBG(" -> queueing failed !\n");
+ rc = OPAL_INTERNAL_ERROR;
+ fsp_freemsg(rtc_write_msg);
+ rtc_write_msg = NULL;
+ goto bail;
+ }
+ rc = OPAL_BUSY_EVENT;
+ bail:
+ unlock(&rtc_lock);
+ if (msg)
+ fsp_freemsg(msg);
+ return rc;
+}
+
+static void rtc_flush_cached_tod(void)
+{
+ struct fsp_msg *msg;
+ uint64_t h_m_s_m;
+ uint32_t y_m_d;
+
+ if (fsp_rtc_get_cached_tod(&y_m_d, &h_m_s_m))
+ return;
+ msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, y_m_d,
+ (h_m_s_m >> 32) & 0xffffff00, 0);
+ if (msg)
+ fsp_queue_msg(msg, fsp_freemsg);
+}
+
+static bool fsp_rtc_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+
+ int rc = false;
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ lock(&rtc_lock);
+ fsp_in_reset = true;
+ unlock(&rtc_lock);
+ rc = true;
+ break;
+ case FSP_RELOAD_COMPLETE:
+ lock(&rtc_lock);
+ fsp_in_reset = false;
+ if (rtc_tod_cache.dirty) {
+ rtc_flush_cached_tod();
+ rtc_tod_cache.dirty = false;
+ }
+ unlock(&rtc_lock);
+ rc = true;
+ break;
+ }
+
+ return rc;
+}
+
+static struct fsp_client fsp_rtc_client_rr = {
+ .message = fsp_rtc_msg_rr,
+};
+
+void fsp_rtc_init(void)
+{
+ struct fsp_msg msg, resp;
+ int rc;
+
+ if (!fsp_present()) {
+ rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
+ return;
+ }
+
+ opal_register(OPAL_RTC_READ, fsp_opal_rtc_read, 2);
+ opal_register(OPAL_RTC_WRITE, fsp_opal_rtc_write, 2);
+
+ /* Register for the reset/reload event */
+ fsp_register_client(&fsp_rtc_client_rr, FSP_MCLASS_RR_EVENT);
+
+ msg.resp = &resp;
+ fsp_fillmsg(&msg, FSP_CMD_READ_TOD, 0);
+
+ DBG("Getting initial RTC TOD\n");
+
+ lock(&rtc_lock);
+
+ rc = fsp_sync_msg(&msg, false);
+
+ if (rc >= 0)
+ fsp_rtc_process_read(&resp);
+ else
+ rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
+
+ unlock(&rtc_lock);
+}
diff --git a/hw/fsp/fsp-sensor.c b/hw/fsp/fsp-sensor.c
new file mode 100644
index 0000000..f4fc19d
--- /dev/null
+++ b/hw/fsp/fsp-sensor.c
@@ -0,0 +1,788 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ */
+
+
+/*
+ * Design note:
+ * This code will enable the 'powernv' to retrieve sensor related data from FSP
+ * using SPCN passthru mailbox commands.
+ *
+ * The OPAL read sensor API in Sapphire is implemented as an 'asynchronous' read
+ * call that returns after queuing the read request. A unique sensor-id is
+ * expected as an argument for OPAL read call which has already been exported
+ * to the device tree during fsp init. The sapphire code decodes this Id to
+ * determine requested attribute and sensor.
+ */
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <device.h>
+#include <spcn.h>
+#include <opal-msg.h>
+#include<fsp-elog.h>
+
+//#define DBG(fmt...) printf("SENSOR: " fmt)
+#define DBG(fmt...) do { } while (0)
+
+#define SENSOR_PREFIX "sensor: "
+#define INVALID_DATA ((uint32_t)-1)
+
+/* Entry size of PRS command modifiers */
+#define PRS_STATUS_ENTRY_SZ 0x08
+#define SENSOR_PARAM_ENTRY_SZ 0x10
+#define SENSOR_DATA_ENTRY_SZ 0x08
+#define PROC_JUNC_ENTRY_SZ 0x04
+
+DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_SENSOR,
+ OPAL_MISC_SUBSYSTEM,
+ OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_READ, OPAL_PLATFORM_ERR_EVT, OPAL_SENSOR,
+ OPAL_MISC_SUBSYSTEM, OPAL_INFO,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_ASYNC_COMPLETE, OPAL_PLATFORM_ERR_EVT,
+ OPAL_SENSOR, OPAL_MISC_SUBSYSTEM, OPAL_INFO,
+ OPAL_NA, NULL);
+
+/* FSP response status codes */
+enum {
+ SP_RSP_STATUS_VALID_DATA = 0x00,
+ SP_RSP_STATUS_INVALID_DATA = 0x22,
+ SP_RSP_STATUS_SPCN_ERR = 0xA8,
+ SP_RSP_STATUS_DMA_ERR = 0x24,
+};
+
+enum sensor_state {
+ SENSOR_VALID_DATA,
+ SENSOR_INVALID_DATA,
+ SENSOR_SPCN_ERROR,
+ SENSOR_DMA_ERROR,
+ SENSOR_PERMANENT_ERROR,
+ SENSOR_OPAL_ERROR,
+};
+
+enum spcn_attr {
+ /* mod 0x01, 0x02 */
+ SENSOR_PRESENT,
+ SENSOR_FAULTED,
+ SENSOR_AC_FAULTED,
+ SENSOR_ON,
+ SENSOR_ON_SUPPORTED,
+ /* mod 0x10, 0x11 */
+ SENSOR_THRS,
+ SENSOR_LOCATION,
+ /* mod 0x12, 0x13 */
+ SENSOR_DATA,
+ /* mod 0x1c */
+ SENSOR_POWER,
+
+ SENSOR_MAX,
+};
+
+/* Parsed sensor attributes, passed through OPAL */
+struct opal_sensor_data {
+ uint64_t async_token; /* Asynchronous token */
+ uint32_t *sensor_data; /* Kernel pointer to copy data */
+ enum spcn_attr spcn_attr; /* Modifier attribute */
+ uint16_t rid; /* Sensor RID */
+ uint8_t frc; /* Sensor resource class */
+ uint32_t mod_index; /* Modifier index*/
+ uint32_t offset; /* Offset in sensor buffer */
+};
+
+struct spcn_mod_attr {
+ const char *name;
+ enum spcn_attr val;
+};
+
+struct spcn_mod {
+ uint8_t mod; /* Modifier code */
+ uint8_t entry_size; /* Size of each entry in response buffer */
+ uint16_t entry_count; /* Number of entries */
+ struct spcn_mod_attr *mod_attr;
+};
+
+static struct spcn_mod_attr prs_status_attrs[] = {
+ {"present", SENSOR_PRESENT},
+ {"faulted", SENSOR_FAULTED},
+ {"ac-faulted", SENSOR_AC_FAULTED},
+ {"on", SENSOR_ON},
+ {"on-supported", SENSOR_ON_SUPPORTED}
+};
+
+static struct spcn_mod_attr sensor_param_attrs[] = {
+ {"thrs", SENSOR_THRS},
+ {"loc", SENSOR_LOCATION}
+};
+
+static struct spcn_mod_attr sensor_data_attrs[] = {
+ {"data", SENSOR_DATA}
+};
+
+static struct spcn_mod_attr sensor_power_attrs[] = {
+ {"power", SENSOR_POWER}
+};
+
+static struct spcn_mod spcn_mod_data[] = {
+ {SPCN_MOD_PRS_STATUS_FIRST, PRS_STATUS_ENTRY_SZ, 0,
+ prs_status_attrs},
+ {SPCN_MOD_PRS_STATUS_SUBS, PRS_STATUS_ENTRY_SZ, 0,
+ prs_status_attrs},
+ {SPCN_MOD_SENSOR_PARAM_FIRST, SENSOR_PARAM_ENTRY_SZ, 0,
+ sensor_param_attrs},
+ {SPCN_MOD_SENSOR_PARAM_SUBS, SENSOR_PARAM_ENTRY_SZ, 0,
+ sensor_param_attrs},
+ {SPCN_MOD_SENSOR_DATA_FIRST, SENSOR_DATA_ENTRY_SZ, 0,
+ sensor_data_attrs},
+ {SPCN_MOD_SENSOR_DATA_SUBS, SENSOR_DATA_ENTRY_SZ, 0,
+ sensor_data_attrs},
+ /* TODO Support this modifier '0x14', if required */
+ /* {SPCN_MOD_PROC_JUNC_TEMP, PROC_JUNC_ENTRY_SZ, 0, NULL}, */
+ {SPCN_MOD_SENSOR_POWER, SENSOR_DATA_ENTRY_SZ, 0,
+ sensor_power_attrs},
+ {SPCN_MOD_LAST, 0xff, 0xffff, NULL}
+};
+
+/* Frame resource class (FRC) names */
+static const char *frc_names[] = {
+ /* 0x00 and 0x01 are reserved */
+ NULL,
+ NULL,
+ "power-controller",
+ "power-supply",
+ "regulator",
+ "cooling-fan",
+ "cooling-controller",
+ "battery-charger",
+ "battery-pack",
+ "amb-temp",
+ "temp",
+ "vrm",
+ "riser-card",
+ "io-backplane"
+};
+
+#define SENSOR_MAX_SIZE 0x00100000
+static void *sensor_buffer = NULL;
+static enum sensor_state sensor_state;
+static bool prev_msg_consumed = true;
+static struct lock sensor_lock;
+
+/* Function prototypes */
+static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr);
+static void queue_msg_for_delivery(int rc, struct opal_sensor_data *attr);
+
+
+/*
+ * Power Resource Status (PRS)
+ * Command: 0x42
+ *
+ * Modifier: 0x01
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 4 5 6 7 |
+ * --------------------------------------------------------------------------
+ * |Frame resrc class| PRID | SRC | Status |
+ * --------------------------------------------------------------------------
+ *
+ *
+ * Modifier: 0x10
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 4 5 6 7 |
+ * --------------------------------------------------------------------------
+ * |Frame resrc class| PRID | Sensor location |
+ * --------------------------------------------------------------------------
+ * --------------------------------------------------------------------------
+ * | 8 9 10 11 12 13 14 15 |
+ * --------------------------------------------------------------------------
+ * | Reserved | Reserved | Threshold | Status |
+ * --------------------------------------------------------------------------
+ *
+ *
+ * Modifier: 0x12
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 4 5 6 7 |
+ * --------------------------------------------------------------------------
+ * |Frame resrc class| PRID | Sensor data | Status |
+ * --------------------------------------------------------------------------
+ *
+ *
+ * Modifier: 0x14
+ * --------------------------------------------------------------------------
+ * | 0 1 2 3 |
+ * --------------------------------------------------------------------------
+ * |Enclosure Tj Avg | Chip Tj Avg | Reserved | Reserved |
+ * --------------------------------------------------------------------------
+ */
+
+static void fsp_sensor_process_data(struct opal_sensor_data *attr)
+{
+ uint8_t *sensor_buf_ptr = (uint8_t *)sensor_buffer;
+ uint32_t sensor_data = INVALID_DATA;
+ uint16_t sensor_mod_data[8];
+ int count, i;
+ uint8_t valid, nr_power;
+ uint32_t power;
+
+ for (count = 0; count < spcn_mod_data[attr->mod_index].entry_count;
+ count++) {
+ memcpy((void *)sensor_mod_data, sensor_buf_ptr,
+ spcn_mod_data[attr->mod_index].entry_size);
+ if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier '0x14', if required */
+
+ } else if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_SENSOR_POWER) {
+ valid = sensor_buf_ptr[0];
+ if (valid & 0x80) {
+ nr_power = valid & 0x0f;
+ sensor_data = 0;
+ for (i=0; i < nr_power; i++) {
+ power = *(uint32_t *) &sensor_buf_ptr[2 + i * 5];
+ DBG("Power[%d]: %d mW\n", i, power);
+ sensor_data += power/1000;
+ }
+ } else {
+ DBG("Power Sensor data not valid\n");
+ }
+ } else if (sensor_mod_data[0] == attr->frc &&
+ sensor_mod_data[1] == attr->rid) {
+ switch (attr->spcn_attr) {
+ /* modifier 0x01, 0x02 */
+ case SENSOR_PRESENT:
+ DBG("Not exported to device tree\n");
+ break;
+ case SENSOR_FAULTED:
+ sensor_data = sensor_mod_data[3] & 0x02;
+ break;
+ case SENSOR_AC_FAULTED:
+ case SENSOR_ON:
+ case SENSOR_ON_SUPPORTED:
+ DBG("Not exported to device tree\n");
+ break;
+ /* modifier 0x10, 0x11 */
+ case SENSOR_THRS:
+ sensor_data = sensor_mod_data[6];
+ break;
+ case SENSOR_LOCATION:
+ DBG("Not exported to device tree\n");
+ break;
+ /* modifier 0x12, 0x13 */
+ case SENSOR_DATA:
+ sensor_data = sensor_mod_data[2];
+ break;
+ default:
+ break;
+ }
+
+ break;
+ }
+
+ sensor_buf_ptr += spcn_mod_data[attr->mod_index].entry_size;
+ }
+
+ *(attr->sensor_data) = sensor_data;
+ if (sensor_data == INVALID_DATA)
+ queue_msg_for_delivery(OPAL_PARTIAL, attr);
+ else
+ queue_msg_for_delivery(OPAL_SUCCESS, attr);
+}
+
+static int fsp_sensor_process_read(struct fsp_msg *resp_msg)
+{
+ uint8_t mbx_rsp_status;
+ uint32_t size = 0;
+
+ mbx_rsp_status = (resp_msg->word1 >> 8) & 0xff;
+ switch (mbx_rsp_status) {
+ case SP_RSP_STATUS_VALID_DATA:
+ sensor_state = SENSOR_VALID_DATA;
+ size = resp_msg->data.words[1] & 0xffff;
+ break;
+ case SP_RSP_STATUS_INVALID_DATA:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Received invalid data\n", __func__);
+ sensor_state = SENSOR_INVALID_DATA;
+ break;
+ case SP_RSP_STATUS_SPCN_ERR:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failure due to SPCN error\n", __func__);
+ sensor_state = SENSOR_SPCN_ERROR;
+ break;
+ case SP_RSP_STATUS_DMA_ERR:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failure due to DMA error\n", __func__);
+ sensor_state = SENSOR_DMA_ERROR;
+ break;
+ default:
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR %s: Read failed, status:0x%02X\n",
+ __func__, mbx_rsp_status);
+ sensor_state = SENSOR_INVALID_DATA;
+ break;
+ }
+
+ return size;
+}
+
+static void queue_msg_for_delivery(int rc, struct opal_sensor_data *attr)
+{
+ DBG("%s: rc:%d, data:%d\n", __func__, rc, *(attr->sensor_data));
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
+ attr->async_token, rc);
+ spcn_mod_data[attr->mod_index].entry_count = 0;
+ free(attr);
+ prev_msg_consumed = true;
+}
+
+static void fsp_sensor_read_complete(struct fsp_msg *msg)
+{
+ struct opal_sensor_data *attr = msg->user_data;
+ enum spcn_rsp_status status;
+ int rc, size;
+
+ DBG("Sensor read completed\n");
+
+ status = (msg->resp->data.words[1] >> 24) & 0xff;
+ size = fsp_sensor_process_read(msg->resp);
+ fsp_freemsg(msg);
+
+ lock(&sensor_lock);
+ if (sensor_state == SENSOR_VALID_DATA) {
+ spcn_mod_data[attr->mod_index].entry_count += (size /
+ spcn_mod_data[attr->mod_index].entry_size);
+ attr->offset += size;
+ /* Fetch the subsequent entries of the same modifier type */
+ if (status == SPCN_RSP_STATUS_COND_SUCCESS) {
+ switch (spcn_mod_data[attr->mod_index].mod) {
+ case SPCN_MOD_PRS_STATUS_FIRST:
+ case SPCN_MOD_SENSOR_PARAM_FIRST:
+ case SPCN_MOD_SENSOR_DATA_FIRST:
+ attr->mod_index++;
+ spcn_mod_data[attr->mod_index].entry_count =
+ spcn_mod_data[attr->mod_index - 1].
+ entry_count;
+ spcn_mod_data[attr->mod_index - 1].entry_count = 0;
+ break;
+ default:
+ break;
+ }
+
+ rc = fsp_sensor_send_read_request(attr);
+ if (rc != OPAL_ASYNC_COMPLETION)
+ goto err;
+ } else { /* Notify 'powernv' of read completion */
+ fsp_sensor_process_data(attr);
+ }
+ } else {
+ rc = OPAL_INTERNAL_ERROR;
+ goto err;
+ }
+ unlock(&sensor_lock);
+ return;
+err:
+ *(attr->sensor_data) = INVALID_DATA;
+ queue_msg_for_delivery(rc, attr);
+ unlock(&sensor_lock);
+ log_simple_error(&e_info(OPAL_RC_SENSOR_ASYNC_COMPLETE),
+ "SENSOR: %s: Failed to queue the "
+ "read request to fsp\n", __func__);
+}
+
+static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr)
+{
+ int rc;
+ struct fsp_msg *msg;
+ uint32_t *sensor_buf_ptr;
+ uint32_t align;
+ uint32_t cmd_header;
+
+ DBG("Get the data for modifier [%d]\n", spcn_mod_data[attr->mod_index].mod);
+ if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier '0x14', if required */
+ align = attr->offset % sizeof(*sensor_buf_ptr);
+ if (align)
+ attr->offset += (sizeof(*sensor_buf_ptr) - align);
+
+ sensor_buf_ptr = (uint32_t *)((uint8_t *)sensor_buffer +
+ attr->offset);
+
+ /* TODO Add 8 byte command data required for mod 0x14 */
+
+ attr->offset += 8;
+
+ cmd_header = spcn_mod_data[attr->mod_index].mod << 24 |
+ SPCN_CMD_PRS << 16 | 0x0008;
+ } else {
+ cmd_header = spcn_mod_data[attr->mod_index].mod << 24 |
+ SPCN_CMD_PRS << 16;
+ }
+
+ msg = fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_header, 0,
+ PSI_DMA_SENSOR_BUF + attr->offset);
+
+ if (!msg) {
+ prerror(SENSOR_PREFIX "%s: Failed to allocate read message"
+ "\n", __func__);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ msg->user_data = attr;
+ rc = fsp_queue_msg(msg, fsp_sensor_read_complete);
+ if (rc) {
+ fsp_freemsg(msg);
+ msg = NULL;
+ prerror(SENSOR_PREFIX "%s: Failed to queue read message, "
+ "%d\n", __func__, rc);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ return OPAL_ASYNC_COMPLETION;
+}
+
+static int64_t parse_sensor_id(uint32_t id, struct opal_sensor_data *attr)
+{
+ uint32_t mod, index;
+
+ attr->spcn_attr = id >> 24;
+ if (attr->spcn_attr >= SENSOR_MAX)
+ return OPAL_PARAMETER;
+
+ if (attr->spcn_attr <= SENSOR_ON_SUPPORTED)
+ mod = SPCN_MOD_PRS_STATUS_FIRST;
+ else if (attr->spcn_attr <= SENSOR_LOCATION)
+ mod = SPCN_MOD_SENSOR_PARAM_FIRST;
+ else if (attr->spcn_attr <= SENSOR_DATA)
+ mod = SPCN_MOD_SENSOR_DATA_FIRST;
+ else if (attr->spcn_attr <= SENSOR_POWER)
+ mod = SPCN_MOD_SENSOR_POWER;
+ else
+ return OPAL_PARAMETER;
+
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST; index++) {
+ if (spcn_mod_data[index].mod == mod)
+ break;
+ }
+
+ attr->mod_index = index;
+ attr->frc = (id >> 16) & 0xff;
+ attr->rid = id & 0xffff;
+
+ return 0;
+}
+
+
+static int64_t fsp_opal_read_sensor(uint32_t sensor_hndl, int token,
+ uint32_t *sensor_data)
+{
+ struct opal_sensor_data *attr;
+ int64_t rc;
+
+ DBG("fsp_opal_read_sensor [%08x]\n", sensor_hndl);
+ if (sensor_state == SENSOR_PERMANENT_ERROR) {
+ rc = OPAL_HARDWARE;
+ goto out;
+ }
+
+ if (!sensor_hndl) {
+ rc = OPAL_PARAMETER;
+ goto out;
+ }
+
+ lock(&sensor_lock);
+ if (prev_msg_consumed) {
+ attr = zalloc(sizeof(*attr));
+ if (!attr) {
+ log_simple_error(&e_info(OPAL_RC_SENSOR_INIT),
+ "SENSOR: Failed to allocate memory\n");
+ rc = OPAL_NO_MEM;
+ goto out_lock;
+ }
+
+ /* Parse the sensor id and store them to the local structure */
+ rc = parse_sensor_id(sensor_hndl, attr);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failed to parse the sensor "
+ "handle[0x%08x]\n", __func__, sensor_hndl);
+ goto out_free;
+ }
+ /* Kernel buffer pointer to copy the data later when ready */
+ attr->sensor_data = sensor_data;
+ attr->async_token = token;
+
+ rc = fsp_sensor_send_read_request(attr);
+ if (rc != OPAL_ASYNC_COMPLETION) {
+ log_simple_error(&e_info(OPAL_RC_SENSOR_READ),
+ "SENSOR: %s: Failed to queue the read "
+ "request to fsp\n", __func__);
+ goto out_free;
+ }
+
+ prev_msg_consumed = false;
+ } else {
+ rc = OPAL_BUSY_EVENT;
+ }
+
+ unlock(&sensor_lock);
+ return rc;
+
+out_free:
+ free(attr);
+out_lock:
+ unlock(&sensor_lock);
+out:
+ return rc;
+}
+
+
+#define MAX_RIDS 64
+#define MAX_NAME 64
+
+static uint32_t get_index(uint32_t *prids, uint16_t rid)
+{
+ int index;
+
+ for (index = 0; prids[index] && index < MAX_RIDS; index++) {
+ if (prids[index] == rid)
+ return index;
+ }
+
+ prids[index] = rid;
+ return index;
+}
+
+static void create_sensor_nodes(int index, uint16_t frc, uint16_t rid,
+ uint32_t *prids, struct dt_node *sensors)
+{
+ char name[MAX_NAME];
+ struct dt_node *fs_node;
+ uint32_t value;
+
+ switch (spcn_mod_data[index].mod) {
+ case SPCN_MOD_PRS_STATUS_FIRST:
+ case SPCN_MOD_PRS_STATUS_SUBS:
+ switch (frc) {
+ case SENSOR_FRC_POWER_SUPPLY:
+ case SENSOR_FRC_COOLING_FAN:
+ snprintf(name, MAX_NAME, "%s#%d-%s", frc_names[frc],
+ /* Start enumeration from 1 */
+ get_index(prids, rid) + 1,
+ spcn_mod_data[index].mod_attr[1].name);
+ fs_node = dt_new(sensors, name);
+ snprintf(name, MAX_NAME, "ibm,opal-sensor-%s",
+ frc_names[frc]);
+ dt_add_property_string(fs_node, "compatible", name);
+ value = spcn_mod_data[index].mod_attr[1].val << 24 |
+ (frc & 0xff) << 16 | rid;
+ dt_add_property_cells(fs_node, "sensor-id", value);
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPCN_MOD_SENSOR_PARAM_FIRST:
+ case SPCN_MOD_SENSOR_PARAM_SUBS:
+ case SPCN_MOD_SENSOR_DATA_FIRST:
+ case SPCN_MOD_SENSOR_DATA_SUBS:
+ switch (frc) {
+ case SENSOR_FRC_POWER_SUPPLY:
+ case SENSOR_FRC_COOLING_FAN:
+ case SENSOR_FRC_AMB_TEMP:
+ snprintf(name, MAX_NAME, "%s#%d-%s", frc_names[frc],
+ /* Start enumeration from 1 */
+ get_index(prids, rid) + 1,
+ spcn_mod_data[index].mod_attr[0].name);
+ fs_node = dt_new(sensors, name);
+ snprintf(name, MAX_NAME, "ibm,opal-sensor-%s",
+ frc_names[frc]);
+ dt_add_property_string(fs_node, "compatible", name);
+ value = spcn_mod_data[index].mod_attr[0].val << 24 |
+ (frc & 0xff) << 16 | rid;
+ dt_add_property_cells(fs_node, "sensor-id", value);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case SPCN_MOD_SENSOR_POWER:
+ fs_node = dt_new(sensors, "power#1-data");
+ dt_add_property_string(fs_node, "compatible", "ibm,opal-sensor-power");
+ value = spcn_mod_data[index].mod_attr[0].val << 24;
+ dt_add_property_cells(fs_node, "sensor-id", value);
+ break;
+ }
+}
+
+static void add_sensor_ids(struct dt_node *sensors)
+{
+ uint32_t MAX_FRC_NAMES = sizeof(frc_names) / sizeof(*frc_names);
+ uint8_t *sensor_buf_ptr = (uint8_t *)sensor_buffer;
+ uint32_t frc_rids[MAX_FRC_NAMES][MAX_RIDS];
+ uint16_t sensor_frc, power_rid;
+ uint16_t sensor_mod_data[8];
+ int index, count;
+
+ memset(frc_rids, 0, sizeof(frc_rids));
+
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST; index++) {
+ if (spcn_mod_data[index].mod == SPCN_MOD_SENSOR_POWER) {
+ create_sensor_nodes(index, 0, 0, 0, sensors);
+ continue;
+ }
+ for (count = 0; count < spcn_mod_data[index].entry_count;
+ count++) {
+ if (spcn_mod_data[index].mod ==
+ SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier '0x14', if
+ * required */
+ } else {
+ memcpy((void *)sensor_mod_data, sensor_buf_ptr,
+ spcn_mod_data[index].entry_size);
+ sensor_frc = sensor_mod_data[0];
+ power_rid = sensor_mod_data[1];
+
+ if (sensor_frc < MAX_FRC_NAMES &&
+ frc_names[sensor_frc])
+ create_sensor_nodes(index, sensor_frc,
+ power_rid,
+ frc_rids[sensor_frc],
+ sensors);
+ }
+
+ sensor_buf_ptr += spcn_mod_data[index].entry_size;
+ }
+ }
+}
+
+static void add_opal_sensor_node(void)
+{
+ int index;
+ struct dt_node *sensors;
+
+ if (!fsp_present())
+ return;
+
+ sensors = dt_new(opal_node, "sensors");
+
+ add_sensor_ids(sensors);
+
+ /* Reset the entry count of each modifier */
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST;
+ index++)
+ spcn_mod_data[index].entry_count = 0;
+}
+
+void fsp_init_sensor(void)
+{
+ uint32_t cmd_header, align, size, psi_dma_offset = 0;
+ enum spcn_rsp_status status;
+ uint32_t *sensor_buf_ptr;
+ struct fsp_msg msg, resp;
+ int index, rc;
+
+ if (!fsp_present()) {
+ sensor_state = SENSOR_PERMANENT_ERROR;
+ return;
+ }
+
+ sensor_buffer = memalign(TCE_PSIZE, SENSOR_MAX_SIZE);
+ if (!sensor_buffer) {
+ prerror("FSP: could not allocate sensor_buffer!\n");
+ return;
+ }
+
+ /* Map TCE */
+ fsp_tce_map(PSI_DMA_SENSOR_BUF, sensor_buffer, PSI_DMA_SENSOR_BUF_SZ);
+
+ /* Register OPAL interface */
+ opal_register(OPAL_SENSOR_READ, fsp_opal_read_sensor, 3);
+
+ msg.resp = &resp;
+
+ /* Traverse using all the modifiers to know all the sensors available
+ * in the system */
+ for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST &&
+ sensor_state == SENSOR_VALID_DATA;) {
+ DBG("Get the data for modifier [%d]\n", spcn_mod_data[index].mod);
+ if (spcn_mod_data[index].mod == SPCN_MOD_PROC_JUNC_TEMP) {
+ /* TODO Support this modifier 0x14, if required */
+ align = psi_dma_offset % sizeof(*sensor_buf_ptr);
+ if (align)
+ psi_dma_offset += (sizeof(*sensor_buf_ptr) - align);
+
+ sensor_buf_ptr = (uint32_t *)((uint8_t *)sensor_buffer
+ + psi_dma_offset);
+
+ /* TODO Add 8 byte command data required for mod 0x14 */
+ psi_dma_offset += 8;
+
+ cmd_header = spcn_mod_data[index].mod << 24 |
+ SPCN_CMD_PRS << 16 | 0x0008;
+ } else {
+ cmd_header = spcn_mod_data[index].mod << 24 |
+ SPCN_CMD_PRS << 16;
+ }
+
+ fsp_fillmsg(&msg, FSP_CMD_SPCN_PASSTHRU, 4,
+ SPCN_ADDR_MODE_CEC_NODE, cmd_header, 0,
+ PSI_DMA_SENSOR_BUF + psi_dma_offset);
+
+ rc = fsp_sync_msg(&msg, false);
+ if (rc >= 0) {
+ status = (resp.data.words[1] >> 24) & 0xff;
+ size = fsp_sensor_process_read(&resp);
+ psi_dma_offset += size;
+ spcn_mod_data[index].entry_count += (size /
+ spcn_mod_data[index].entry_size);
+ } else {
+ sensor_state = SENSOR_PERMANENT_ERROR;
+ break;
+ }
+
+ switch (spcn_mod_data[index].mod) {
+ case SPCN_MOD_PRS_STATUS_FIRST:
+ case SPCN_MOD_SENSOR_PARAM_FIRST:
+ case SPCN_MOD_SENSOR_DATA_FIRST:
+ if (status == SPCN_RSP_STATUS_COND_SUCCESS)
+ index++;
+ else
+ index += 2;
+
+ break;
+ case SPCN_MOD_PRS_STATUS_SUBS:
+ case SPCN_MOD_SENSOR_PARAM_SUBS:
+ case SPCN_MOD_SENSOR_DATA_SUBS:
+ if (status != SPCN_RSP_STATUS_COND_SUCCESS)
+ index++;
+ break;
+ case SPCN_MOD_SENSOR_POWER:
+ index++;
+ default:
+ break;
+ }
+ }
+
+ if (sensor_state != SENSOR_VALID_DATA)
+ sensor_state = SENSOR_PERMANENT_ERROR;
+ else
+ add_opal_sensor_node();
+}
diff --git a/hw/fsp/fsp-surveillance.c b/hw/fsp/fsp-surveillance.c
new file mode 100644
index 0000000..c1d19b6
--- /dev/null
+++ b/hw/fsp/fsp-surveillance.c
@@ -0,0 +1,209 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <fsp.h>
+#include <lock.h>
+#include <processor.h>
+#include <timebase.h>
+#include <fsp-sysparam.h>
+#include <fsp-elog.h>
+
+static bool fsp_surv_state = false;
+static bool fsp_surv_ack_pending = false;
+static u64 surv_timer;
+static u64 surv_ack_timer;
+static u32 surv_state_param;
+static struct lock surv_lock = LOCK_UNLOCKED;
+
+#define FSP_SURV_ACK_TIMEOUT 120 /* surv ack timeout in seconds */
+
+DEFINE_LOG_ENTRY(OPAL_RC_SURVE_INIT, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE,
+ OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_MISCELLANEOUS_INFO_ONLY, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SURVE_STATUS, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE,
+ OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_MISCELLANEOUS_INFO_ONLY, NULL);
+
+static void fsp_surv_ack(struct fsp_msg *msg)
+{
+ uint8_t val;
+
+ if (!msg->resp)
+ return;
+
+ val = (msg->resp->word1 >> 8) & 0xff;
+ if (val == 0) {
+ /* reset the pending flag */
+ printf("SURV: Received heartbeat acknowledge from FSP\n");
+ lock(&surv_lock);
+ fsp_surv_ack_pending = false;
+ unlock(&surv_lock);
+ } else
+ prerror("SURV: Heartbeat Acknowledgment error from FSP\n");
+
+ fsp_freemsg(msg);
+}
+
+static void fsp_surv_check_timeout(void)
+{
+ u64 now = mftb();
+
+ /*
+ * We just checked fsp_surv_ack_pending to be true in fsp_surv_hbeat
+ * and we haven't dropped the surv_lock between then and now. So, we
+ * just go ahead and check timeouts.
+ */
+ if (tb_compare(now, surv_ack_timer) == TB_AAFTERB) {
+ /* XXX: We should be logging a PEL to the host, assuming
+ * the FSP is dead, pending a R/R.
+ */
+ prerror("SURV: [%16llx] Surv ACK timed out; initiating R/R\n",
+ now);
+
+ /* Reset the pending trigger too */
+ fsp_surv_ack_pending = false;
+ fsp_trigger_reset();
+ }
+
+ return;
+}
+
+/* Send surveillance heartbeat based on a timebase trigger */
+static void fsp_surv_hbeat(void)
+{
+ u64 now = mftb();
+
+ /* Check if an ack is pending... if so, don't send the ping just yet */
+ if (fsp_surv_ack_pending) {
+ fsp_surv_check_timeout();
+ return;
+ }
+
+ /* add timebase callbacks */
+ /*
+ * XXX This packet needs to be pushed to FSP in an interval
+ * less than 120s that's advertised to FSP.
+ *
+ * Verify if the command building format and call is fine.
+ */
+ if (surv_timer == 0 ||
+ (tb_compare(now, surv_timer) == TB_AAFTERB) ||
+ (tb_compare(now, surv_timer) == TB_AEQUALB)) {
+ printf("SURV: [%16llx] Sending the hearbeat command to FSP\n",
+ now);
+ fsp_queue_msg(fsp_mkmsg(FSP_CMD_SURV_HBEAT, 1, 120),
+ fsp_surv_ack);
+
+ fsp_surv_ack_pending = true;
+ surv_timer = now + secs_to_tb(60);
+ surv_ack_timer = now + secs_to_tb(FSP_SURV_ACK_TIMEOUT);
+ }
+}
+
+static void fsp_surv_poll(void *data __unused)
+{
+ if (!fsp_surv_state)
+ return;
+ lock(&surv_lock);
+ fsp_surv_hbeat();
+ unlock(&surv_lock);
+}
+
+static void fsp_surv_got_param(uint32_t param_id __unused, int err_len,
+ void *data __unused)
+{
+ if (err_len != 4) {
+ log_simple_error(&e_info(OPAL_RC_SURVE_STATUS),
+ "SURV: Error retreiving surveillance status: %d\n",
+ err_len);
+ return;
+ }
+
+ printf("SURV: Status from FSP: %d\n", surv_state_param);
+ if (!(surv_state_param & 0x01))
+ return;
+
+ lock(&surv_lock);
+ fsp_surv_state = true;
+
+ /* Also send one heartbeat now. The next one will not happen
+ * until we hit the OS.
+ */
+ fsp_surv_hbeat();
+ unlock(&surv_lock);
+}
+
+void fsp_surv_query(void)
+{
+ int rc;
+
+ printf("SURV: Querying FSP's surveillance status\n");
+
+ /* Reset surveillance settings */
+ lock(&surv_lock);
+ fsp_surv_state = false;
+ surv_timer = 0;
+ surv_ack_timer = 0;
+ unlock(&surv_lock);
+
+ /* Query FPS for surveillance state */
+ rc = fsp_get_sys_param(SYS_PARAM_SURV, &surv_state_param, 4,
+ fsp_surv_got_param, NULL);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SURVE_INIT),
+ "SURV: Error %d queueing param request\n", rc);
+ }
+}
+
+static bool fsp_surv_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ assert(msg == NULL);
+
+ switch (cmd_sub_mod) {
+ case FSP_RESET_START:
+ printf("SURV: Disabling surveillance\n");
+ fsp_surv_state = false;
+ fsp_surv_ack_pending = false;
+ return true;
+ case FSP_RELOAD_COMPLETE:
+ fsp_surv_query();
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_surv_client_rr = {
+ .message = fsp_surv_msg_rr,
+};
+
+/* This is called at boot time */
+void fsp_init_surveillance(void)
+{
+ /* Always register the poller, so we don't have to add/remove
+ * it on reset-reload or change of surveillance state. Also the
+ * poller list has no locking so we don't want to play with it
+ * at runtime.
+ */
+ opal_add_poller(fsp_surv_poll, NULL);
+
+ /* Register for the reset/reload event */
+ fsp_register_client(&fsp_surv_client_rr, FSP_MCLASS_RR_EVENT);
+
+ /* Send query to FSP */
+ fsp_surv_query();
+}
+
diff --git a/hw/fsp/fsp-sysparam.c b/hw/fsp/fsp-sysparam.c
new file mode 100644
index 0000000..e9e5b16
--- /dev/null
+++ b/hw/fsp/fsp-sysparam.c
@@ -0,0 +1,454 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <fsp.h>
+#include <opal.h>
+#include <device.h>
+#include <lock.h>
+#include <processor.h>
+#include <psi.h>
+#include <opal-msg.h>
+#include <fsp-sysparam.h>
+
+struct sysparam_comp_data {
+ uint32_t param_len;
+ uint64_t async_token;
+};
+
+struct sysparam_req {
+ sysparam_compl_t completion;
+ void *comp_data;
+ void *ubuf;
+ uint32_t ulen;
+ struct fsp_msg msg;
+ struct fsp_msg resp;
+ bool done;
+};
+
+static struct sysparam_attr {
+ const char *name;
+ uint32_t id;
+ uint32_t length;
+ uint8_t perm;
+} sysparam_attrs[] = {
+#define _R OPAL_SYSPARAM_READ
+#define _W OPAL_SYSPARAM_WRITE
+#define _RW OPAL_SYSPARAM_RW
+ {"surveillance", SYS_PARAM_SURV, 4, _RW},
+ {"hmc-management", SYS_PARAM_HMC_MANAGED, 4, _R},
+ {"cupd-policy", SYS_PARAM_FLASH_POLICY, 4, _RW},
+ {"plat-hmc-managed", SYS_PARAM_NEED_HMC, 4, _RW},
+ {"fw-license-policy", SYS_PARAM_FW_LICENSE, 4, _RW},
+ {"world-wide-port-num", SYS_PARAM_WWPN, 12, _W},
+ {"default-boot-device", SYS_PARAM_DEF_BOOT_DEV, 1, _RW},
+ {"next-boot-device", SYS_PARAM_NEXT_BOOT_DEV,1, _RW}
+#undef _R
+#undef _W
+#undef _RW
+};
+
+static int fsp_sysparam_process(struct sysparam_req *r)
+{
+ u32 param_id, len;
+ int stlen = 0;
+ u8 fstat;
+ /* Snapshot completion before we set the "done" flag */
+ sysparam_compl_t comp = r->completion;
+ void *cdata = r->comp_data;
+
+ if (r->msg.state != fsp_msg_done) {
+ prerror("FSP: Request for sysparam 0x%x got FSP failure!\n",
+ r->msg.data.words[0]);
+ stlen = -1; /* XXX Find saner error codes */
+ goto complete;
+ }
+
+ param_id = r->resp.data.words[0];
+ len = r->resp.data.words[1] & 0xffff;
+
+ /* Check params validity */
+ if (param_id != r->msg.data.words[0]) {
+ prerror("FSP: Request for sysparam 0x%x got resp. for 0x%x!\n",
+ r->msg.data.words[0], param_id);
+ stlen = -2; /* XXX Sane error codes */
+ goto complete;
+ }
+ if (len > r->ulen) {
+ prerror("FSP: Request for sysparam 0x%x truncated!\n",
+ param_id);
+ len = r->ulen;
+ }
+
+ /* Decode the request status */
+ fstat = (r->msg.resp->word1 >> 8) & 0xff;
+ switch(fstat) {
+ case 0x00: /* XXX Is that even possible ? */
+ case 0x11: /* Data in request */
+ memcpy(r->ubuf, &r->resp.data.words[2], len);
+ /* pass through */
+ case 0x12: /* Data in TCE */
+ stlen = len;
+ break;
+ default:
+ stlen = -fstat;
+ }
+ complete:
+ /* Call completion if any */
+ if (comp)
+ comp(r->msg.data.words[0], stlen, cdata);
+
+ free(r);
+
+ return stlen;
+}
+
+static void fsp_sysparam_get_complete(struct fsp_msg *msg)
+{
+ struct sysparam_req *r = container_of(msg, struct sysparam_req, msg);
+
+ /* If it's an asynchronous request, process it now */
+ if (r->completion) {
+ fsp_sysparam_process(r);
+ return;
+ }
+
+ /* Else just set the done flag */
+
+ /* Another CPU can be polling on the "done" flag without the
+ * lock held, so let's order the udpates to the structure
+ */
+ lwsync();
+ r->done = true;
+}
+
+int fsp_get_sys_param(uint32_t param_id, void *buffer, uint32_t length,
+ sysparam_compl_t async_complete, void *comp_data)
+{
+ struct sysparam_req *r;
+ uint64_t baddr, tce_token;
+ int rc;
+
+ if (!fsp_present())
+ return -ENODEV;
+ /*
+ * XXX FIXME: We currently always allocate the sysparam_req here
+ * however, we want to avoid runtime allocations as much as
+ * possible, so if this is going to be used a lot at runtime,
+ * we probably want to pre-allocate a pool of these
+ */
+ r = zalloc(sizeof(struct sysparam_req));
+ if (!r)
+ return -ENOMEM;
+ if (length > 4096)
+ return -EINVAL;
+ r->completion = async_complete;
+ r->comp_data = comp_data;
+ r->done = false;
+ r->ubuf = buffer;
+ r->ulen = length;
+ r->msg.resp = &r->resp;
+
+ /* Map always 1 page ... easier that way and none of that
+ * is performance critical
+ */
+ baddr = (uint64_t)buffer;
+ fsp_tce_map(PSI_DMA_GET_SYSPARAM, (void *)(baddr & ~0xffful), 0x1000);
+ tce_token = PSI_DMA_GET_SYSPARAM | (baddr & 0xfff);
+ fsp_fillmsg(&r->msg, FSP_CMD_QUERY_SPARM, 3,
+ param_id, length, tce_token);
+ rc = fsp_queue_msg(&r->msg, fsp_sysparam_get_complete);
+
+ /* Asynchronous operation or queueing failure, return */
+ if (rc || async_complete)
+ return rc;
+
+ /* Synchronous operation requested, spin and process */
+ while(!r->done)
+ fsp_poll();
+
+ /* Will free the request */
+ return fsp_sysparam_process(r);
+}
+
+static void fsp_opal_getparam_complete(uint32_t param_id __unused, int err_len,
+ void *data)
+{
+ struct sysparam_comp_data *comp_data = data;
+ int rc = OPAL_SUCCESS;
+
+ if (comp_data->param_len != err_len)
+ rc = OPAL_INTERNAL_ERROR;
+
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
+ comp_data->async_token, rc);
+ free(comp_data);
+}
+
+static void fsp_opal_setparam_complete(struct fsp_msg *msg)
+{
+ struct sysparam_comp_data *comp_data = msg->user_data;
+ u8 fstat;
+ uint32_t param_id;
+ int rc = OPAL_SUCCESS;
+
+ if (msg->state != fsp_msg_done) {
+ prerror("FSP: Request for set sysparam 0x%x got FSP failure!\n",
+ msg->data.words[0]);
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+
+ param_id = msg->resp->data.words[0];
+ if (param_id != msg->data.words[0]) {
+ prerror("FSP: Request for set sysparam 0x%x got resp. for 0x%x!"
+ "\n", msg->data.words[0], param_id);
+ rc = OPAL_INTERNAL_ERROR;
+ goto out;
+ }
+
+ fstat = (msg->resp->word1 >> 8) & 0xff;
+ switch (fstat) {
+ case 0x00:
+ rc = OPAL_SUCCESS;
+ break;
+ case 0x22:
+ prerror("%s: Response status 0x%x, invalid data\n", __func__,
+ fstat);
+ rc = OPAL_INTERNAL_ERROR;
+ break;
+ case 0x24:
+ prerror("%s: Response status 0x%x, DMA error\n", __func__,
+ fstat);
+ rc = OPAL_INTERNAL_ERROR;
+ break;
+ default:
+ rc = OPAL_INTERNAL_ERROR;
+ break;
+ }
+
+out:
+ opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
+ comp_data->async_token, rc);
+ free(comp_data);
+ fsp_freemsg(msg);
+}
+
+/* OPAL interface for PowerNV to read the system parameter from FSP */
+static int64_t fsp_opal_get_param(uint64_t async_token, uint32_t param_id,
+ uint64_t buffer, uint64_t length)
+{
+ struct sysparam_comp_data *comp_data;
+ int count, rc, i;
+
+ if (!fsp_present())
+ return OPAL_HARDWARE;
+
+ count = ARRAY_SIZE(sysparam_attrs);
+ for (i = 0; i < count; i++)
+ if (sysparam_attrs[i].id == param_id)
+ break;
+ if (i == count)
+ return OPAL_PARAMETER;
+
+ if (length < sysparam_attrs[i].length)
+ return OPAL_PARAMETER;
+ if (!(sysparam_attrs[i].perm & OPAL_SYSPARAM_READ))
+ return OPAL_PERMISSION;
+
+ comp_data = zalloc(sizeof(struct sysparam_comp_data));
+ if (!comp_data)
+ return OPAL_NO_MEM;
+
+ comp_data->param_len = sysparam_attrs[i].length;
+ comp_data->async_token = async_token;
+ rc = fsp_get_sys_param(param_id, (void *)buffer,
+ sysparam_attrs[i].length, fsp_opal_getparam_complete,
+ comp_data);
+ if (rc) {
+ free(comp_data);
+ prerror("%s: Error %d queuing param request\n", __func__, rc);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ return OPAL_ASYNC_COMPLETION;
+}
+
+/* OPAL interface for PowerNV to update the system parameter to FSP */
+static int64_t fsp_opal_set_param(uint64_t async_token, uint32_t param_id,
+ uint64_t buffer, uint64_t length)
+{
+ struct sysparam_comp_data *comp_data;
+ struct fsp_msg *msg;
+ uint64_t tce_token;
+ int count, rc, i;
+
+ if (!fsp_present())
+ return OPAL_HARDWARE;
+
+ count = ARRAY_SIZE(sysparam_attrs);
+ for (i = 0; i < count; i++)
+ if (sysparam_attrs[i].id == param_id)
+ break;
+ if (i == count)
+ return OPAL_PARAMETER;
+
+ if (length < sysparam_attrs[i].length)
+ return OPAL_PARAMETER;
+ if (!(sysparam_attrs[i].perm & OPAL_SYSPARAM_WRITE))
+ return OPAL_PERMISSION;
+
+ fsp_tce_map(PSI_DMA_SET_SYSPARAM, (void *)(buffer & ~0xffful), 0x1000);
+ tce_token = PSI_DMA_SET_SYSPARAM | (buffer & 0xfff);
+
+ msg = fsp_mkmsg(FSP_CMD_SET_SPARM_2, 4, param_id, length,
+ tce_token >> 32, tce_token);
+ if (!msg) {
+ prerror("%s: Failed to allocate the message\n", __func__);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ comp_data = zalloc(sizeof(struct sysparam_comp_data));
+ if (!comp_data)
+ return OPAL_NO_MEM;
+
+ comp_data->param_len = length;
+ comp_data->async_token = async_token;
+ msg->user_data = comp_data;
+
+ rc = fsp_queue_msg(msg, fsp_opal_setparam_complete);
+ if (rc) {
+ free(comp_data);
+ fsp_freemsg(msg);
+ prerror("%s: Failed to queue the message\n", __func__);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ return OPAL_ASYNC_COMPLETION;
+}
+
+static bool fsp_sysparam_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ struct fsp_msg *rsp;
+ int rc = -ENOMEM;
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_SP_SPARM_UPD_0:
+ case FSP_CMD_SP_SPARM_UPD_1:
+ printf("FSP: Got sysparam update, param ID 0x%x\n",
+ msg->data.words[0]);
+ rsp = fsp_mkmsg((cmd_sub_mod & 0xffff00) | 0x008000, 0);
+ if (rsp)
+ rc = fsp_queue_msg(rsp, fsp_freemsg);
+ if (rc) {
+ prerror("FSP: Error %d queuing sysparam reply\n", rc);
+ /* What to do here ? R/R ? */
+ fsp_freemsg(rsp);
+ }
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_sysparam_client = {
+ .message = fsp_sysparam_msg,
+};
+
+static void add_opal_sysparam_node(void)
+{
+ struct dt_node *sysparams;
+ char *names, *s;
+ uint32_t *ids, *lens;
+ uint8_t *perms;
+ unsigned int i, count, size = 0;
+
+ if (!fsp_present())
+ return;
+
+ sysparams = dt_new(opal_node, "sysparams");
+ dt_add_property_string(sysparams, "compatible", "ibm,opal-sysparams");
+
+ count = ARRAY_SIZE(sysparam_attrs);
+ for (i = 0; i < count; i++)
+ size = size + strlen(sysparam_attrs[i].name) + 1;
+
+ names = zalloc(size);
+ if (!names) {
+ prerror("%s: Failed to allocate memory for parameter names\n",
+ __func__);
+ return;
+ }
+
+ ids = zalloc(count * sizeof(*ids));
+ if (!ids) {
+ prerror("%s: Failed to allocate memory for parameter ids\n",
+ __func__);
+ goto out_free_name;
+ }
+
+ lens = zalloc(count * sizeof(*lens));
+ if (!lens) {
+ prerror("%s: Failed to allocate memory for parameter length\n",
+ __func__);
+ goto out_free_id;
+ }
+
+ perms = zalloc(count * sizeof(*perms));
+ if (!perms) {
+ prerror("%s: Failed to allocate memory for parameter length\n",
+ __func__);
+ goto out_free_len;
+ }
+
+ s = names;
+ for (i = 0; i < count; i++) {
+ strcpy(s, sysparam_attrs[i].name);
+ s = s + strlen(sysparam_attrs[i].name) + 1;
+
+ ids[i] = sysparam_attrs[i].id;
+ lens[i] = sysparam_attrs[i].length;
+ perms[i] = sysparam_attrs[i].perm;
+ }
+
+ dt_add_property(sysparams, "param-name", names, size);
+ dt_add_property(sysparams, "param-id", ids, count * sizeof(*ids));
+ dt_add_property(sysparams, "param-len", lens, count * sizeof(*lens));
+ dt_add_property(sysparams, "param-perm", perms, count * sizeof(*perms));
+
+ free(perms);
+
+out_free_len:
+ free(lens);
+out_free_id:
+ free(ids);
+out_free_name:
+ free(names);
+}
+
+void fsp_sysparam_init(void)
+{
+ if (!fsp_present())
+ return;
+
+ /* Register change notifications */
+ fsp_register_client(&fsp_sysparam_client, FSP_MCLASS_SERVICE);
+
+ /* Register OPAL interfaces */
+ opal_register(OPAL_GET_PARAM, fsp_opal_get_param, 4);
+ opal_register(OPAL_SET_PARAM, fsp_opal_set_param, 4);
+
+ /* Add device-tree nodes */
+ add_opal_sysparam_node();
+}
diff --git a/hw/fsp/fsp.c b/hw/fsp/fsp.c
new file mode 100644
index 0000000..5dc298a
--- /dev/null
+++ b/hw/fsp/fsp.c
@@ -0,0 +1,2147 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Service Processor handling code
+ *
+ * XXX This mixes PSI and FSP and currently only supports
+ * P7/P7+ PSI and FSP1
+ *
+ * If we are going to support P8 PSI and FSP2, we probably want
+ * to split the PSI support from the FSP support proper first.
+ */
+#include <stdarg.h>
+#include <processor.h>
+#include <io.h>
+#include <fsp.h>
+#include <lock.h>
+#include <interrupts.h>
+#include <gx.h>
+#include <device.h>
+#include <trace.h>
+#include <timebase.h>
+#include <cpu.h>
+#include <fsp-elog.h>
+
+DEFINE_LOG_ENTRY(OPAL_RC_FSP_POLL_TIMEOUT, OPAL_PLATFORM_ERR_EVT, OPAL_FSP,
+ OPAL_PLATFORM_FIRMWARE, OPAL_ERROR_PANIC, OPAL_NA, NULL);
+
+//#define DBG(fmt...) printf(fmt)
+#define DBG(fmt...) do { } while(0)
+#define FSP_TRACE_MSG
+#define FSP_TRACE_EVENT
+
+#define FSP_MAX_IOPATH 4
+
+enum fsp_path_state {
+ fsp_path_bad,
+ fsp_path_backup,
+ fsp_path_active,
+};
+
+struct fsp_iopath {
+ enum fsp_path_state state;
+ void *fsp_regs;
+ struct psi *psi;
+};
+
+enum fsp_mbx_state {
+ fsp_mbx_idle, /* Mailbox ready to send */
+ fsp_mbx_send, /* Mailbox sent, waiting for ack */
+ fsp_mbx_crit_op, /* Critical operation in progress */
+ fsp_mbx_prep_for_reset, /* Prepare for reset sent */
+ fsp_mbx_err, /* Mailbox in error state, waiting for r&r */
+ fsp_mbx_rr, /* Mailbox in r&r */
+};
+
+struct fsp {
+ struct fsp *link;
+ unsigned int index;
+ enum fsp_mbx_state state;
+ struct fsp_msg *pending;
+
+ unsigned int iopath_count;
+ int active_iopath; /* -1: no active IO path */
+ struct fsp_iopath iopath[FSP_MAX_IOPATH];
+};
+
+static struct fsp *first_fsp;
+static struct fsp *active_fsp;
+static u16 fsp_curseq = 0x8000;
+static u64 *fsp_tce_table;
+
+#define FSP_INBOUND_SIZE 0x00100000UL
+static void *fsp_inbound_buf = NULL;
+static u32 fsp_inbound_off;
+
+static struct lock fsp_lock = LOCK_UNLOCKED;
+
+static u64 fsp_cmdclass_resp_bitmask;
+static u64 timeout_timer;
+
+static u64 fsp_hir_timeout;
+
+#define FSP_CRITICAL_OP_TIMEOUT 128
+#define FSP_DRCR_CLEAR_TIMEOUT 128
+
+/*
+ * We keep track on last logged values for some things to print only on
+ * value changes, but also to releive pressure on the tracer which
+ * doesn't do a very good job at detecting repeats when called from
+ * many different CPUs
+ */
+static u32 disr_last_print;
+static u32 drcr_last_print;
+static u32 hstate_last_print;
+
+void fsp_handle_resp(struct fsp_msg *msg);
+
+struct fsp_cmdclass {
+ int timeout;
+ bool busy;
+ struct list_head msgq;
+ struct list_head clientq;
+ struct list_head rr_queue; /* To queue up msgs during R/R */
+ u64 timesent;
+};
+
+static struct fsp_cmdclass fsp_cmdclass_rr;
+
+static struct fsp_cmdclass fsp_cmdclass[FSP_MCLASS_LAST - FSP_MCLASS_FIRST + 1]
+= {
+#define DEF_CLASS(_cl, _to) [_cl - FSP_MCLASS_FIRST] = { .timeout = _to }
+ DEF_CLASS(FSP_MCLASS_SERVICE, 16),
+ DEF_CLASS(FSP_MCLASS_PCTRL_MSG, 16),
+ DEF_CLASS(FSP_MCLASS_PCTRL_ABORTS, 16),
+ DEF_CLASS(FSP_MCLASS_ERR_LOG, 16),
+ DEF_CLASS(FSP_MCLASS_CODE_UPDATE, 40),
+ DEF_CLASS(FSP_MCLASS_FETCH_SPDATA, 16),
+ DEF_CLASS(FSP_MCLASS_FETCH_HVDATA, 16),
+ DEF_CLASS(FSP_MCLASS_NVRAM, 16),
+ DEF_CLASS(FSP_MCLASS_MBOX_SURV, 2),
+ DEF_CLASS(FSP_MCLASS_RTC, 16),
+ DEF_CLASS(FSP_MCLASS_SMART_CHIP, 20),
+ DEF_CLASS(FSP_MCLASS_INDICATOR, 180),
+ DEF_CLASS(FSP_MCLASS_HMC_INTFMSG, 16),
+ DEF_CLASS(FSP_MCLASS_HMC_VT, 16),
+ DEF_CLASS(FSP_MCLASS_HMC_BUFFERS, 16),
+ DEF_CLASS(FSP_MCLASS_SHARK, 16),
+ DEF_CLASS(FSP_MCLASS_MEMORY_ERR, 16),
+ DEF_CLASS(FSP_MCLASS_CUOD_EVENT, 16),
+ DEF_CLASS(FSP_MCLASS_HW_MAINT, 16),
+ DEF_CLASS(FSP_MCLASS_VIO, 16),
+ DEF_CLASS(FSP_MCLASS_SRC_MSG, 16),
+ DEF_CLASS(FSP_MCLASS_DATA_COPY, 16),
+ DEF_CLASS(FSP_MCLASS_TONE, 16),
+ DEF_CLASS(FSP_MCLASS_VIRTUAL_NVRAM, 16),
+ DEF_CLASS(FSP_MCLASS_TORRENT, 16),
+ DEF_CLASS(FSP_MCLASS_NODE_PDOWN, 16),
+ DEF_CLASS(FSP_MCLASS_DIAG, 16),
+ DEF_CLASS(FSP_MCLASS_PCIE_LINK_TOPO, 16),
+ DEF_CLASS(FSP_MCLASS_OCC, 16),
+};
+
+static void fsp_trace_msg(struct fsp_msg *msg, u8 dir __unused)
+{
+ union trace fsp __unused;
+#ifdef FSP_TRACE_MSG
+ size_t len = offsetof(struct trace_fsp_msg, data[msg->dlen]);
+
+ fsp.fsp_msg.dlen = msg->dlen;
+ fsp.fsp_msg.word0 = msg->word0;
+ fsp.fsp_msg.word1 = msg->word1;
+ fsp.fsp_msg.dir = dir;
+ memcpy(fsp.fsp_msg.data, msg->data.bytes, msg->dlen);
+ trace_add(&fsp, TRACE_FSP_MSG, len);
+#endif /* FSP_TRACE_MSG */
+ assert(msg->dlen <= sizeof(fsp.fsp_msg.data));
+}
+
+static struct fsp *fsp_get_active(void)
+{
+ /* XXX Handle transition between FSPs */
+ return active_fsp;
+}
+
+static u64 fsp_get_class_bit(u8 class)
+{
+ /* Alias classes CE and CF as the FSP has a single queue */
+ if (class == FSP_MCLASS_IPL)
+ class = FSP_MCLASS_SERVICE;
+
+ return 1ul << (class - FSP_MCLASS_FIRST);
+}
+
+static struct fsp_cmdclass *__fsp_get_cmdclass(u8 class)
+{
+ struct fsp_cmdclass *ret;
+
+ /* RR class is special */
+ if (class == FSP_MCLASS_RR_EVENT)
+ return &fsp_cmdclass_rr;
+
+ /* Bound check */
+ if (class < FSP_MCLASS_FIRST || class > FSP_MCLASS_LAST)
+ return NULL;
+
+ /* Alias classes CE and CF as the FSP has a single queue */
+ if (class == FSP_MCLASS_IPL)
+ class = FSP_MCLASS_SERVICE;
+
+ ret = &fsp_cmdclass[class - FSP_MCLASS_FIRST];
+
+ /* Unknown class */
+ if (ret->timeout == 0)
+ return NULL;
+
+ return ret;
+}
+
+static struct fsp_cmdclass *fsp_get_cmdclass(struct fsp_msg *msg)
+{
+ u8 c = msg->word0 & 0xff;
+
+ return __fsp_get_cmdclass(c);
+}
+
+static struct fsp_msg *__fsp_allocmsg(void)
+{
+ return zalloc(sizeof(struct fsp_msg));
+}
+
+struct fsp_msg *fsp_allocmsg(bool alloc_response)
+{
+ struct fsp_msg *msg;
+
+ msg = __fsp_allocmsg();
+ if (!msg)
+ return NULL;
+ if (alloc_response)
+ msg->resp = __fsp_allocmsg();
+ return msg;
+}
+
+void __fsp_freemsg(struct fsp_msg *msg)
+{
+ free(msg);
+}
+
+void fsp_freemsg(struct fsp_msg *msg)
+{
+ if (msg->resp)
+ __fsp_freemsg(msg->resp);
+ __fsp_freemsg(msg);
+}
+
+void fsp_cancelmsg(struct fsp_msg *msg)
+{
+ bool need_unlock = false;
+ struct fsp_cmdclass* cmdclass = fsp_get_cmdclass(msg);
+ struct fsp *fsp = fsp_get_active();
+
+ if (fsp->state != fsp_mbx_rr) {
+ prerror("FSP: Message cancel allowed only when"
+ "FSP is in reset\n");
+ return;
+ }
+
+ if (!cmdclass)
+ return;
+
+ /* Recursive locking */
+ need_unlock = lock_recursive(&fsp_lock);
+
+ list_del(&msg->link);
+ msg->state = fsp_msg_cancelled;
+
+ if (need_unlock)
+ unlock(&fsp_lock);
+}
+
+static void fsp_wreg(struct fsp *fsp, u32 reg, u32 val)
+{
+ struct fsp_iopath *iop;
+
+ if (fsp->active_iopath < 0)
+ return;
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (iop->state == fsp_path_bad)
+ return;
+ out_be32(iop->fsp_regs + reg, val);
+}
+
+static u32 fsp_rreg(struct fsp *fsp, u32 reg)
+{
+ struct fsp_iopath *iop;
+
+ if (fsp->active_iopath < 0)
+ return 0xffffffff;
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (iop->state == fsp_path_bad)
+ return 0xffffffff;
+ return in_be32(iop->fsp_regs + reg);
+}
+
+static void fsp_reg_dump(void)
+{
+#define FSP_DUMP_ONE(x) \
+ printf(" %20s: %x\n", #x, fsp_rreg(fsp, x));
+
+ struct fsp *fsp = fsp_get_active();
+
+ if (!fsp)
+ return;
+
+ printf("FSP #%d: Register dump (state=%d)\n",
+ fsp->index, fsp->state);
+ FSP_DUMP_ONE(FSP_DRCR_REG);
+ FSP_DUMP_ONE(FSP_DISR_REG);
+ FSP_DUMP_ONE(FSP_MBX1_HCTL_REG);
+ FSP_DUMP_ONE(FSP_MBX1_FCTL_REG);
+ FSP_DUMP_ONE(FSP_MBX2_HCTL_REG);
+ FSP_DUMP_ONE(FSP_MBX2_FCTL_REG);
+ FSP_DUMP_ONE(FSP_SDES_REG);
+ FSP_DUMP_ONE(FSP_HDES_REG);
+ FSP_DUMP_ONE(FSP_HDIR_REG);
+ FSP_DUMP_ONE(FSP_HDIM_SET_REG);
+ FSP_DUMP_ONE(FSP_PDIR_REG);
+ FSP_DUMP_ONE(FSP_PDIM_SET_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH0_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH1_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH2_REG);
+ FSP_DUMP_ONE(FSP_SCRATCH3_REG);
+}
+
+static void fsp_notify_rr_state(u32 state)
+{
+ struct fsp_client *client, *next;
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(FSP_MCLASS_RR_EVENT);
+
+ assert(cmdclass);
+ list_for_each_safe(&cmdclass->clientq, client, next, link)
+ client->message(state, NULL);
+}
+
+static void fsp_reset_cmdclass(void)
+{
+ int i;
+ struct fsp_msg *msg;
+
+ for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i];
+ cmdclass->busy = false;
+ cmdclass->timesent = 0;
+
+ /* We also need to reset the 'timeout' timers here */
+
+ /* Make sure the message queue is empty */
+ while(!list_empty(&cmdclass->msgq)) {
+ msg = list_pop(&cmdclass->msgq, struct fsp_msg,
+ link);
+ list_add_tail(&cmdclass->rr_queue, &msg->link);
+ }
+ }
+}
+
+static bool fsp_in_hir(struct fsp *fsp)
+{
+ switch (fsp->state) {
+ case fsp_mbx_crit_op:
+ case fsp_mbx_prep_for_reset:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool fsp_in_reset(struct fsp *fsp)
+{
+ switch (fsp->state) {
+ case fsp_mbx_err: /* Will be reset soon */
+ case fsp_mbx_rr: /* Already in reset */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool fsp_hir_state_timeout(void)
+{
+ u64 now = mftb();
+
+ if (tb_compare(now, fsp_hir_timeout) == TB_AAFTERB)
+ return true;
+
+ return false;
+}
+
+static void fsp_set_hir_timeout(u32 seconds)
+{
+ u64 now = mftb();
+ fsp_hir_timeout = now + secs_to_tb(seconds);
+}
+
+static bool fsp_crit_op_in_progress(struct fsp *fsp)
+{
+ u32 disr = fsp_rreg(fsp, FSP_DISR_REG);
+
+ if (disr & FSP_DISR_CRIT_OP_IN_PROGRESS)
+ return true;
+
+ return false;
+}
+
+/* Notify the FSP that it will be reset soon by writing to the DRCR */
+static void fsp_prep_for_reset(struct fsp *fsp)
+{
+ u32 drcr = fsp_rreg(fsp, FSP_DRCR_REG);
+
+ printf("FSP: Writing reset to DRCR\n");
+ drcr_last_print = drcr;
+ fsp_wreg(fsp, FSP_DRCR_REG, (drcr | FSP_PREP_FOR_RESET_CMD));
+ fsp->state = fsp_mbx_prep_for_reset;
+ fsp_set_hir_timeout(FSP_DRCR_CLEAR_TIMEOUT);
+}
+
+static void fsp_hir_poll(struct fsp *fsp, struct psi *psi)
+{
+ u32 drcr;
+
+ switch (fsp->state) {
+ case fsp_mbx_crit_op:
+ if (fsp_crit_op_in_progress(fsp)) {
+ if (fsp_hir_state_timeout())
+ prerror("FSP: Critical operation timeout\n");
+ /* XXX What do do next? Check with FSP folks */
+ } else {
+ fsp_prep_for_reset(fsp);
+ }
+ break;
+ case fsp_mbx_prep_for_reset:
+ drcr = fsp_rreg(fsp, FSP_DRCR_REG);
+
+ if (drcr != drcr_last_print) {
+ printf("FSP: DRCR changed, old = %x, new = %x\n",
+ drcr_last_print, drcr);
+ drcr_last_print = drcr;
+ }
+
+ if (drcr & FSP_DRCR_ACK_MASK) {
+ if (fsp_hir_state_timeout()) {
+ prerror("FSP: Ack timeout. Triggering reset\n");
+ psi_disable_link(psi);
+ fsp->state = fsp_mbx_err;
+ }
+ } else {
+ printf("FSP: DRCR ack received. Triggering reset\n");
+ psi_disable_link(psi);
+ fsp->state = fsp_mbx_err;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * This is the main entry for the host initiated reset case.
+ * This gets called when:
+ * a. Surveillance ack is not received in 120 seconds
+ * b. A mailbox command doesn't get a response within the stipulated time.
+ */
+static void __fsp_trigger_reset(void)
+{
+ struct fsp *fsp = fsp_get_active();
+ u32 disr;
+
+ /* Already in one of the error processing states */
+ if (fsp_in_hir(fsp) || fsp_in_reset(fsp))
+ return;
+
+ prerror("FSP: fsp_trigger_reset() entry\n");
+
+ drcr_last_print = 0;
+ /*
+ * Check if we are allowed to reset the FSP. We aren't allowed to
+ * reset the FSP if the FSP_DISR_DBG_IN_PROGRESS is set.
+ */
+ disr = fsp_rreg(fsp, FSP_DISR_REG);
+ if (disr & FSP_DISR_DBG_IN_PROGRESS) {
+ prerror("FSP: Host initiated reset disabled\n");
+ return;
+ }
+
+ /*
+ * Check if some critical operation is in progress as indicated
+ * by FSP_DISR_CRIT_OP_IN_PROGRESS. Timeout is 128 seconds
+ */
+ if (fsp_crit_op_in_progress(fsp)) {
+ printf("FSP: Critical operation in progress\n");
+ fsp->state = fsp_mbx_crit_op;
+ fsp_set_hir_timeout(FSP_CRITICAL_OP_TIMEOUT);
+ } else
+ fsp_prep_for_reset(fsp);
+}
+
+void fsp_trigger_reset(void)
+{
+ lock(&fsp_lock);
+ __fsp_trigger_reset();
+ unlock(&fsp_lock);
+}
+
+static void fsp_start_rr(struct fsp *fsp)
+{
+ struct fsp_iopath *iop;
+
+ if (fsp->state == fsp_mbx_rr)
+ return;
+
+ /* We no longer have an active path on that FSP */
+ if (fsp->active_iopath >= 0) {
+ iop = &fsp->iopath[fsp->active_iopath];
+ iop->state = fsp_path_bad;
+ fsp->active_iopath = -1;
+ }
+ fsp->state = fsp_mbx_rr;
+ disr_last_print = 0;
+ hstate_last_print = 0;
+
+ /*
+ * Mark all command classes as non-busy and clear their
+ * timeout, then flush all messages in our staging queue
+ */
+ fsp_reset_cmdclass();
+
+ /* Notify clients. We have to drop the lock here */
+ unlock(&fsp_lock);
+ fsp_notify_rr_state(FSP_RESET_START);
+ lock(&fsp_lock);
+
+ /* Start polling PSI */
+ psi_set_link_polling(true);
+}
+
+static void fsp_trace_event(struct fsp *fsp, u32 evt,
+ u32 data0, u32 data1, u32 data2, u32 data3)
+{
+ union trace tfsp __unused;
+#ifdef FSP_TRACE_EVENT
+ size_t len = sizeof(struct trace_fsp_event);
+
+ tfsp.fsp_evt.event = evt;
+ tfsp.fsp_evt.fsp_state = fsp->state;
+ tfsp.fsp_evt.data[0] = data0;
+ tfsp.fsp_evt.data[1] = data1;
+ tfsp.fsp_evt.data[2] = data2;
+ tfsp.fsp_evt.data[3] = data3;
+ trace_add(&tfsp, TRACE_FSP_EVENT, len);
+#endif /* FSP_TRACE_EVENT */
+}
+
+static void fsp_handle_errors(struct fsp *fsp)
+{
+ u32 hstate;
+ struct fsp_iopath *iop;
+ struct psi *psi;
+ u32 disr;
+
+ if (fsp->active_iopath < 0) {
+ prerror("FSP #%d: fsp_handle_errors() with no active IOP\n",
+ fsp->index);
+ return;
+ }
+
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (!iop->psi) {
+ prerror("FSP: Active IOP with no PSI link !\n");
+ return;
+ }
+ psi = iop->psi;
+
+ /*
+ * If the link is not up, start R&R immediately, we do call
+ * psi_disable_link() in this case as while the link might
+ * not be up, it might still be enabled and the PSI layer
+ * "active" bit still set
+ */
+ if (!psi_check_link_active(psi)) {
+ /* Start R&R process */
+ fsp_trace_event(fsp, TRACE_FSP_EVT_LINK_DOWN, 0, 0, 0, 0);
+ prerror("FSP #%d: Link down, starting R&R\n", fsp->index);
+
+ /* If we got here due to a host initiated reset, the link
+ * is already driven down.
+ */
+ if (fsp->state == fsp_mbx_err)
+ psi_disable_link(psi);
+ fsp_start_rr(fsp);
+ return;
+ }
+
+ /* Link is up, check for other conditions */
+ disr = fsp_rreg(fsp, FSP_DISR_REG);
+
+ /* If in R&R, log values */
+ if (disr != disr_last_print) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_DISR_CHG, disr, 0, 0, 0);
+
+ printf("FSP #%d: DISR stat change = 0x%08x\n",
+ fsp->index, disr);
+ disr_last_print = disr;
+ }
+
+ /*
+ * We detect FSP_IN_RR in DSISR or we have a deferred mbox
+ * error, we trigger an R&R after a bit of housekeeping to
+ * limit the chance of a stray interrupt
+ */
+ if ((disr & FSP_DISR_FSP_IN_RR) || (fsp->state == fsp_mbx_err)) {
+ /*
+ * When the linux comes back up, we still see that bit
+ * set for a bit, so just move on, nothing to see here
+ */
+ if (fsp->state == fsp_mbx_rr)
+ return;
+
+ fsp_trace_event(fsp, TRACE_FSP_EVT_SOFT_RR, disr, 0, 0, 0);
+
+ printf("FSP #%d: FSP in reset or delayed error, starting R&R\n",
+ fsp->index);
+
+ /* Clear all interrupt conditions */
+ fsp_wreg(fsp, FSP_HDIR_REG, FSP_DBIRQ_ALL);
+
+ /* Make sure this happened */
+ fsp_rreg(fsp, FSP_HDIR_REG);
+
+ /* Bring the PSI link down */
+ psi_disable_link(psi);
+
+ /* Start R&R process */
+ fsp_start_rr(fsp);
+ return;
+ }
+
+ /*
+ * We detect an R&R complete indication, acknolwedge it
+ */
+ if (disr & FSP_DISR_FSP_RR_COMPLETE) {
+ /*
+ * Acking this bit doens't make it go away immediately, so
+ * only do it while still in R&R state
+ */
+ if (fsp->state == fsp_mbx_rr) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_RR_COMPL, 0,0,0,0);
+
+ printf("FSP #%d: Detected R&R complete, acking\n",
+ fsp->index);
+
+ /* Clear HDATA area */
+ fsp_wreg(fsp, FSP_MBX1_HDATA_AREA, 0xff);
+
+ /* Ack it (XDN) and clear HPEND & counts */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG,
+ FSP_MBX_CTL_PTS |
+ FSP_MBX_CTL_XDN |
+ FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK |
+ FSP_MBX_CTL_DCSP_MASK);
+
+ /*
+ * Mark the mbox as usable again so we can process
+ * incoming messages
+ */
+ fsp->state = fsp_mbx_idle;
+ }
+ }
+
+ /*
+ * XXX
+ *
+ * Here we detect a number of errors, should we initiate
+ * and R&R ?
+ */
+
+ hstate = fsp_rreg(fsp, FSP_HDES_REG);
+ if (hstate != hstate_last_print) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_HDES_CHG, hstate, 0, 0, 0);
+
+ printf("FSP #%d: HDES stat change = 0x%08x\n",
+ fsp->index, hstate);
+ hstate_last_print = disr;
+ }
+
+ if (hstate == 0xffffffff)
+ return;
+
+ /* Clear errors */
+ fsp_wreg(fsp, FSP_HDES_REG, FSP_DBERRSTAT_CLR1);
+
+ /*
+ * Most of those errors shouldn't have happened, we just clear
+ * the error state and return. In the long run, we might want
+ * to start retrying commands, switching FSPs or links, etc...
+ *
+ * We currently don't set our mailbox to a permanent error state.
+ */
+ if (hstate & FSP_DBERRSTAT_ILLEGAL1)
+ prerror("FSP #%d: Illegal command error !\n", fsp->index);
+
+ if (hstate & FSP_DBERRSTAT_WFULL1)
+ prerror("FSP #%d: Write to a full mbox !\n", fsp->index);
+
+ if (hstate & FSP_DBERRSTAT_REMPTY1)
+ prerror("FSP #%d: Read from an empty mbox !\n", fsp->index);
+
+ if (hstate & FSP_DBERRSTAT_PAR1)
+ prerror("FSP #%d: Parity error !\n", fsp->index);
+}
+
+/*
+ * This is called by fsp_post_msg() to check if the mbox
+ * is in a state that allows sending of a message
+ *
+ * Due to the various "interesting" contexts fsp_post_msg()
+ * can be called from, including recursive locks from lock
+ * error messages or console code, this should avoid doing
+ * anything more complex than checking a bit of state.
+ *
+ * Specifically, we cannot initiate an R&R and call back into
+ * clients etc... from this function.
+ *
+ * The best we can do is to se the mbox in error state and
+ * handle it later during a poll or interrupts.
+ */
+static bool fsp_check_can_send(struct fsp *fsp)
+{
+ struct fsp_iopath *iop;
+ struct psi *psi;
+
+ /* Look for FSP in non-idle state */
+ if (fsp->state != fsp_mbx_idle)
+ return false;
+
+ /* Look for an active IO path */
+ if (fsp->active_iopath < 0)
+ goto mbox_error;
+ iop = &fsp->iopath[fsp->active_iopath];
+ if (!iop->psi) {
+ prerror("FSP: Active IOP with no PSI link !\n");
+ goto mbox_error;
+ }
+ psi = iop->psi;
+
+ /* Check if link has gone down. This will be handled later */
+ if (!psi_check_link_active(psi)) {
+ prerror("FSP #%d: Link seems to be down on send\n", fsp->index);
+ goto mbox_error;
+ }
+
+ /* XXX Do we want to check for other error conditions ? */
+ return true;
+
+ /*
+ * An error of some case occurred, we'll handle it later
+ * from a more normal "poll" context
+ */
+ mbox_error:
+ fsp->state = fsp_mbx_err;
+ return false;
+}
+
+static bool fsp_post_msg(struct fsp *fsp, struct fsp_msg *msg)
+{
+ u32 ctl, reg;
+ int i, wlen;
+
+ DBG("FSP #%d: fsp_post_msg (w0: 0x%08x w1: 0x%08x)\n",
+ fsp->index, msg->word0, msg->word1);
+
+ /* Note: We used to read HCTL here and only modify some of
+ * the bits in it. This was bogus, because we would write back
+ * the incoming bits as '1' and clear them, causing fsp_poll()
+ * to then miss them. Let's just start with 0, which is how
+ * I suppose the HW intends us to do.
+ */
+
+ /* Set ourselves as busy */
+ fsp->pending = msg;
+ fsp->state = fsp_mbx_send;
+ msg->state = fsp_msg_sent;
+
+ /* We trace after setting the mailbox state so that if the
+ * tracing recurses, it ends up just queuing the message up
+ */
+ fsp_trace_msg(msg, TRACE_FSP_MSG_OUT);
+
+ /* Build the message in the mailbox */
+ reg = FSP_MBX1_HDATA_AREA;
+ fsp_wreg(fsp, reg, msg->word0); reg += 4;
+ fsp_wreg(fsp, reg, msg->word1); reg += 4;
+ wlen = (msg->dlen + 3) >> 2;
+ for (i = 0; i < wlen; i++) {
+ fsp_wreg(fsp, reg, msg->data.words[i]);
+ reg += 4;
+ }
+
+ /* Write the header */
+ fsp_wreg(fsp, FSP_MBX1_HHDR0_REG, (msg->dlen + 8) << 16);
+
+ /* Write the control register */
+ ctl = 4 << FSP_MBX_CTL_HCHOST_SHIFT;
+ ctl |= (msg->dlen + 8) << FSP_MBX_CTL_DCHOST_SHIFT;
+ ctl |= FSP_MBX_CTL_PTS | FSP_MBX_CTL_SPPEND;
+ DBG(" new ctl: %08x\n", ctl);
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG, ctl);
+
+ return true;
+}
+
+static void fsp_poke_queue(struct fsp_cmdclass *cmdclass)
+{
+ struct fsp *fsp = fsp_get_active();
+ struct fsp_msg *msg;
+
+ if (!fsp)
+ return;
+ if (!fsp_check_can_send(fsp))
+ return;
+
+ /* From here to the point where fsp_post_msg() sets fsp->state
+ * to !idle we must not cause any re-entrancy (no debug or trace)
+ * in a code path that may hit fsp_post_msg() (it's ok to do so
+ * if we are going to bail out), as we are committed to calling
+ * fsp_post_msg() and so a re-entrancy could cause us to do a
+ * double-send into the mailbox.
+ */
+ if (cmdclass->busy || list_empty(&cmdclass->msgq))
+ return;
+
+ msg = list_top(&cmdclass->msgq, struct fsp_msg, link);
+ assert(msg);
+ cmdclass->busy = true;
+
+ if (!fsp_post_msg(fsp, msg)) {
+ prerror("FSP #%d: Failed to send message\n", fsp->index);
+ cmdclass->busy = false;
+ return;
+ }
+}
+
+static void __fsp_fillmsg(struct fsp_msg *msg, u32 cmd_sub_mod,
+ u8 add_words, va_list list)
+{
+ bool response = !!(cmd_sub_mod & 0x1000000);
+ u8 cmd = (cmd_sub_mod >> 16) & 0xff;
+ u8 sub = (cmd_sub_mod >> 8) & 0xff;
+ u8 mod = cmd_sub_mod & 0xff;
+ int i;
+
+ msg->word0 = cmd & 0xff;
+ msg->word1 = mod << 8 | sub;
+ msg->response = response;
+ msg->dlen = add_words << 2;
+
+ for (i = 0; i < add_words; i++)
+ msg->data.words[i] = va_arg(list, unsigned int);
+ va_end(list);
+
+ /* Initialize the value with false. If this ends up
+ * in fsp_sync_msg, we will set it to true.
+ */
+ msg->sync_msg = false;
+}
+
+extern void fsp_fillmsg(struct fsp_msg *msg, u32 cmd_sub_mod, u8 add_words, ...)
+{
+ va_list list;
+
+ va_start(list, add_words);
+ __fsp_fillmsg(msg, cmd_sub_mod, add_words, list);
+ va_end(list);
+}
+
+struct fsp_msg *fsp_mkmsg(u32 cmd_sub_mod, u8 add_words, ...)
+{
+ struct fsp_msg *msg = fsp_allocmsg(!!(cmd_sub_mod & 0x1000000));
+ va_list list;
+
+ if (!msg) {
+ prerror("FSP: Failed to allocate struct fsp_msg\n");
+ return NULL;
+ }
+
+ va_start(list, add_words);
+ __fsp_fillmsg(msg, cmd_sub_mod, add_words, list);
+ va_end(list);
+
+ return msg;
+}
+
+/*
+ * IMPORTANT NOTE: This is *guaranteed* to not call the completion
+ * routine recusrively for *any* fsp message, either the
+ * queued one or a previous one. Thus it is *ok* to call
+ * this function with a lock held which will itself be
+ * taken by the completion function.
+ *
+ * Any change to this implementation must respect this
+ * rule. This will be especially true of things like
+ * reset/reload and error handling, if we fail to queue
+ * we must just return an error, not call any completion
+ * from the scope of fsp_queue_msg().
+ */
+int fsp_queue_msg(struct fsp_msg *msg, void (*comp)(struct fsp_msg *msg))
+{
+ struct fsp_cmdclass *cmdclass;
+ struct fsp *fsp = fsp_get_active();
+ bool need_unlock;
+ u16 seq;
+ int rc = 0;
+
+ if (!fsp)
+ return -1;
+
+ /* Recursive locking */
+ need_unlock = lock_recursive(&fsp_lock);
+
+ /* Grab a new sequence number */
+ seq = fsp_curseq;
+ fsp_curseq = fsp_curseq + 1;
+ if (fsp_curseq == 0)
+ fsp_curseq = 0x8000;
+ msg->word0 = (msg->word0 & 0xffff) | seq << 16;
+
+ /* Set completion */
+ msg->complete = comp;
+
+ /* Clear response state */
+ if (msg->resp)
+ msg->resp->state = fsp_msg_unused;
+
+ /* Queue the message in the appropriate queue */
+ cmdclass = fsp_get_cmdclass(msg);
+ if (!cmdclass) {
+ prerror("FSP: Invalid msg in fsp_queue_msg w0/1=0x%08x/%08x\n",
+ msg->word0, msg->word1);
+ rc = -1;
+ goto unlock;
+ }
+
+ msg->state = fsp_msg_queued;
+
+ /*
+ * If we have initiated or about to initiate a reset/reload operation,
+ * we stash the message on the R&R backup queue. Otherwise, queue it
+ * normally and poke the HW
+ */
+ if (fsp_in_hir(fsp) || fsp_in_reset(fsp))
+ list_add_tail(&cmdclass->rr_queue, &msg->link);
+ else {
+ list_add_tail(&cmdclass->msgq, &msg->link);
+ fsp_poke_queue(cmdclass);
+ }
+
+ unlock:
+ if (need_unlock)
+ unlock(&fsp_lock);
+
+ return rc;
+}
+
+/* WARNING: This will drop the FSP lock !!! */
+static void fsp_complete_msg(struct fsp_msg *msg)
+{
+ struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg);
+ void (*comp)(struct fsp_msg *msg);
+
+ assert(cmdclass);
+
+ DBG(" completing msg, word0: 0x%08x\n", msg->word0);
+
+ comp = msg->complete;
+ list_del_from(&cmdclass->msgq, &msg->link);
+ cmdclass->busy = false;
+ msg->state = fsp_msg_done;
+
+ unlock(&fsp_lock);
+ if (comp)
+ (*comp)(msg);
+ lock(&fsp_lock);
+}
+
+/* WARNING: This will drop the FSP lock !!! */
+static void fsp_complete_send(struct fsp *fsp)
+{
+ struct fsp_msg *msg = fsp->pending;
+ struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg);
+
+ assert(msg);
+ assert(cmdclass);
+
+ fsp->pending = NULL;
+
+ DBG(" completing send, word0: 0x%08x, resp: %d\n",
+ msg->word0, msg->response);
+
+ if (msg->response) {
+ u64 setbit = fsp_get_class_bit(msg->word0 & 0xff);
+ msg->state = fsp_msg_wresp;
+ fsp_cmdclass_resp_bitmask |= setbit;
+ cmdclass->timesent = mftb();
+ } else
+ fsp_complete_msg(msg);
+}
+
+static void fsp_alloc_inbound(struct fsp_msg *msg)
+{
+ u16 func_id = msg->data.words[0] & 0xffff;
+ u32 len = msg->data.words[1];
+ u32 tce_token = 0, act_len = 0;
+ u8 rc = 0;
+ void *buf;
+
+ printf("FSP: Allocate inbound buffer func: %04x len: %d\n",
+ func_id, len);
+
+ lock(&fsp_lock);
+ if ((fsp_inbound_off + len) > FSP_INBOUND_SIZE) {
+ prerror("FSP: Out of space in buffer area !\n");
+ rc = 0xeb;
+ goto reply;
+ }
+
+ if (!fsp_inbound_buf) {
+ fsp_inbound_buf = memalign(TCE_PSIZE, FSP_INBOUND_SIZE);
+ if (!fsp_inbound_buf) {
+ prerror("FSP: could not allocate fsp_inbound_buf!\n");
+ rc = 0xeb;
+ goto reply;
+ }
+ }
+
+ buf = fsp_inbound_buf + fsp_inbound_off;
+ tce_token = PSI_DMA_INBOUND_BUF + fsp_inbound_off;
+ len = (len + 0xfff) & ~0xfff;
+ fsp_inbound_off += len;
+ fsp_tce_map(tce_token, buf, len);
+ printf("FSP: -> buffer at 0x%p, TCE: 0x%08x, alen: 0x%x\n",
+ buf, tce_token, len);
+ act_len = len;
+
+ reply:
+ unlock(&fsp_lock);
+ fsp_queue_msg(fsp_mkmsg(FSP_RSP_ALLOC_INBOUND | rc,
+ 3, 0, tce_token, act_len), fsp_freemsg);
+}
+
+void *fsp_inbound_buf_from_tce(u32 tce_token)
+{
+ u32 offset = tce_token - PSI_DMA_INBOUND_BUF;
+
+ if (tce_token < PSI_DMA_INBOUND_BUF || offset >= fsp_inbound_off) {
+ prerror("FSP: TCE token 0x%x out of bounds\n", tce_token);
+ return NULL;
+ }
+ return fsp_inbound_buf + offset;
+}
+
+static void fsp_repost_queued_msgs_post_rr(void)
+{
+ struct fsp_msg *msg;
+ int i;
+
+ for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i];
+ bool poke = false;
+
+ while(!list_empty(&cmdclass->rr_queue)) {
+ msg = list_pop(&cmdclass->rr_queue,
+ struct fsp_msg, link);
+ list_add_tail(&cmdclass->msgq, &msg->link);
+ poke = true;
+ }
+ if (poke)
+ fsp_poke_queue(cmdclass);
+ }
+}
+
+static bool fsp_local_command(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ u32 cmd = 0;
+ u32 rsp_data = 0;
+
+ switch(cmd_sub_mod) {
+ case FSP_CMD_CONTINUE_IPL:
+ /* We get a CONTINUE_IPL as a response to OPL */
+ printf("FSP: Got CONTINUE_IPL !\n");
+ ipl_state |= ipl_got_continue;
+ return true;
+
+ case FSP_CMD_HV_STATE_CHG:
+ printf("FSP: Got HV state change request to %d\n",
+ msg->data.bytes[0]);
+
+ /* Send response synchronously for now, we might want to
+ * deal with that sort of stuff asynchronously if/when
+ * we add support for auto-freeing of messages
+ */
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_HV_STATE_CHG, 0), true);
+ return true;
+
+ case FSP_CMD_SP_NEW_ROLE:
+ /* FSP is assuming a new role */
+ printf("FSP: FSP assuming new role\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_SP_NEW_ROLE, 0), true);
+ ipl_state |= ipl_got_new_role;
+ return true;
+
+ case FSP_CMD_SP_QUERY_CAPS:
+ printf("FSP: FSP query capabilities\n");
+ /* XXX Do something saner. For now do a synchronous
+ * response and hard code our capabilities
+ */
+ fsp_sync_msg(fsp_mkmsg(FSP_RSP_SP_QUERY_CAPS, 4,
+ 0x3ff80000, 0, 0, 0), true);
+ ipl_state |= ipl_got_caps;
+ return true;
+ case FSP_CMD_FSP_FUNCTNAL:
+ printf("FSP: Got FSP Functional\n");
+ ipl_state |= ipl_got_fsp_functional;
+ return true;
+ case FSP_CMD_ALLOC_INBOUND:
+ fsp_alloc_inbound(msg);
+ return true;
+ case FSP_CMD_SP_RELOAD_COMP:
+ printf("FSP: SP says Reset/Reload complete\n");
+ if (msg->data.bytes[3] & PPC_BIT8(0)) {
+ fsp_fips_dump_notify(msg->data.words[1],
+ msg->data.words[2]);
+
+ if (msg->data.bytes[3] & PPC_BIT8(1))
+ printf(" PLID is %x\n",
+ msg->data.words[3]);
+ }
+ if (msg->data.bytes[3] & PPC_BIT8(2))
+ printf(" A Reset/Reload was NOT done\n");
+ else {
+ /* Notify clients that the FSP is back up */
+ fsp_notify_rr_state(FSP_RELOAD_COMPLETE);
+ fsp_repost_queued_msgs_post_rr();
+ }
+ return true;
+ case FSP_CMD_PANELSTATUS:
+ case FSP_CMD_PANELSTATUS_EX1:
+ case FSP_CMD_PANELSTATUS_EX2:
+ /* Panel status messages. We currently just ignore them */
+ return true;
+ case FSP_CMD_CLOSE_HMC_INTF:
+ /* Close the HMC interface */
+ /* Though Sapphire does not support a HMC connection, the FSP
+ * sends this message when it is trying to open any new
+ * hypervisor session. So returning an error 0x51.
+ */
+ cmd = FSP_RSP_CLOSE_HMC_INTF | FSP_STAUS_INVALID_HMC_ID;
+ rsp_data = msg->data.bytes[0] << 24 | msg->data.bytes[1] << 16;
+ rsp_data &= 0xffff0000;
+ fsp_queue_msg(fsp_mkmsg(cmd, 1, rsp_data), fsp_freemsg);
+ return true;
+ }
+ return false;
+}
+
+
+/* This is called without the FSP lock */
+static void fsp_handle_command(struct fsp_msg *msg)
+{
+ struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg);
+ struct fsp_client *client, *next;
+ u32 cmd_sub_mod;
+
+ if (!cmdclass) {
+ prerror("FSP: Got message for unknown class %x\n",
+ msg->word0 & 0xff);
+ goto free;
+ }
+
+ cmd_sub_mod = (msg->word0 & 0xff) << 16;
+ cmd_sub_mod |= (msg->word1 & 0xff) << 8;
+ cmd_sub_mod |= (msg->word1 >> 8) & 0xff;
+
+ /* Some commands are handled locally */
+ if (fsp_local_command(cmd_sub_mod, msg))
+ goto free;
+
+ /* The rest go to clients */
+ list_for_each_safe(&cmdclass->clientq, client, next, link) {
+ if (client->message(cmd_sub_mod, msg))
+ goto free;
+ }
+
+ prerror("FSP: Unhandled message %06x\n", cmd_sub_mod);
+
+ /* We don't know whether the message expected some kind of
+ * response, so we send one anyway
+ */
+ fsp_queue_msg(fsp_mkmsg((cmd_sub_mod & 0xffff00) | 0x008020, 0),
+ fsp_freemsg);
+ free:
+ fsp_freemsg(msg);
+}
+
+static void __fsp_fill_incoming(struct fsp *fsp, struct fsp_msg *msg,
+ int dlen, u32 w0, u32 w1)
+{
+ unsigned int wlen, i, reg;
+
+ msg->dlen = dlen - 8;
+ msg->word0 = w0;
+ msg->word1 = w1;
+ wlen = (dlen + 3) >> 2;
+ reg = FSP_MBX1_FDATA_AREA + 8;
+ for (i = 0; i < wlen; i++) {
+ msg->data.words[i] = fsp_rreg(fsp, reg);
+ reg += 4;
+ }
+
+ /* Ack it (XDN) and clear HPEND & counts */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG,
+ FSP_MBX_CTL_PTS |
+ FSP_MBX_CTL_XDN |
+ FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK |
+ FSP_MBX_CTL_DCSP_MASK);
+
+ fsp_trace_msg(msg, TRACE_FSP_MSG_IN);
+}
+
+static void __fsp_drop_incoming(struct fsp *fsp)
+{
+ /* Ack it (XDN) and clear HPEND & counts */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG,
+ FSP_MBX_CTL_PTS |
+ FSP_MBX_CTL_XDN |
+ FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK |
+ FSP_MBX_CTL_DCSP_MASK);
+}
+
+/* WARNING: This will drop the FSP lock */
+static void fsp_handle_incoming(struct fsp *fsp)
+{
+ struct fsp_msg *msg;
+ u32 h0, w0, w1;
+ unsigned int dlen;
+ bool special_response = false;
+
+ h0 = fsp_rreg(fsp, FSP_MBX1_FHDR0_REG);
+ dlen = (h0 >> 16) & 0xff;
+
+ w0 = fsp_rreg(fsp, FSP_MBX1_FDATA_AREA);
+ w1 = fsp_rreg(fsp, FSP_MBX1_FDATA_AREA + 4);
+
+ DBG(" Incoming: w0: 0x%08x, w1: 0x%08x, dlen: %d\n",
+ w0, w1, dlen);
+
+ /* Some responses are expected out of band */
+ if ((w0 & 0xff) == FSP_MCLASS_HMC_INTFMSG &&
+ ((w1 & 0xff) == 0x8a || ((w1 & 0xff) == 0x8b)))
+ special_response = true;
+
+ /* Check for response bit */
+ if (w1 & 0x80 && !special_response) {
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(w0 & 0xff);
+ struct fsp_msg *req;
+
+ if (!cmdclass) {
+ prerror("FSP: Got response for unknown class %x\n",
+ w0 & 0xff);
+ __fsp_drop_incoming(fsp);
+ return;
+ }
+
+ if (!cmdclass->busy || list_empty(&cmdclass->msgq)) {
+ prerror("FSP #%d: Got orphan response !\n", fsp->index);
+ __fsp_drop_incoming(fsp);
+ return;
+ }
+ req = list_top(&cmdclass->msgq, struct fsp_msg, link);
+
+ /* Check if the response seems to match the message */
+ if (req->state != fsp_msg_wresp ||
+ (req->word0 & 0xff) != (w0 & 0xff) ||
+ (req->word1 & 0xff) != (w1 & 0x7f)) {
+ __fsp_drop_incoming(fsp);
+ prerror("FSP #%d: Response doesn't match pending msg\n",
+ fsp->index);
+ return;
+ } else {
+ u64 resetbit = ~fsp_get_class_bit(req->word0 & 0xff);
+ fsp_cmdclass_resp_bitmask &= resetbit;
+ cmdclass->timesent = 0;
+ }
+
+ /* Allocate response if needed XXX We need to complete
+ * the original message with some kind of error here ?
+ */
+ if (!req->resp) {
+ req->resp = __fsp_allocmsg();
+ if (!req->resp) {
+ __fsp_drop_incoming(fsp);
+ prerror("FSP #%d: Failed to allocate response\n",
+ fsp->index);
+ return;
+ }
+ }
+
+ /* Populate and complete (will drop the lock) */
+ req->resp->state = fsp_msg_response;
+ __fsp_fill_incoming(fsp, req->resp, dlen, w0, w1);
+ fsp_complete_msg(req);
+ return;
+ }
+
+ /* Allocate an incoming message */
+ msg = __fsp_allocmsg();
+ if (!msg) {
+ __fsp_drop_incoming(fsp);
+ prerror("FSP #%d: Failed to allocate incoming msg\n",
+ fsp->index);
+ return;
+ }
+ msg->state = fsp_msg_incoming;
+ __fsp_fill_incoming(fsp, msg, dlen, w0, w1);
+
+ /* Handle FSP commands. This can recurse into fsp_queue_msg etc.. */
+ unlock(&fsp_lock);
+ fsp_handle_command(msg);
+ lock(&fsp_lock);
+}
+
+static void fsp_check_queues(struct fsp *fsp)
+{
+ int i;
+
+ /* XXX In the long run, we might want to have a queue of
+ * classes waiting to be serviced to speed this up, either
+ * that or a bitmap.
+ */
+ for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i];
+
+ if (fsp->state != fsp_mbx_idle)
+ break;
+ if (cmdclass->busy || list_empty(&cmdclass->msgq))
+ continue;
+ fsp_poke_queue(cmdclass);
+ }
+}
+
+static void __fsp_poll(bool interrupt)
+{
+ struct fsp_iopath *iop;
+ struct fsp *fsp = fsp_get_active();
+ u32 ctl, hdir = 0;
+ bool psi_irq;
+
+ /*
+ * The tracer isn't terribly efficient at detecting dups
+ * especially when coming from multiple CPUs so we do our
+ * own change-detection locally
+ */
+ static u32 hdir_last_trace;
+ static u32 ctl_last_trace;
+ static bool psi_irq_last_trace;
+ static bool irq_last_trace;
+
+ if (!fsp)
+ return;
+
+ /* Crazy interrupt handling scheme:
+ *
+ * In order to avoid "losing" interrupts when polling the mbox
+ * we only clear interrupt conditions when called as a result of
+ * an interrupt.
+ *
+ * That way, if a poll clears, for example, the HPEND condition,
+ * the interrupt remains, causing a dummy interrupt later on
+ * thus allowing the OS to be notified of a state change (ie it
+ * doesn't need every poll site to monitor every state change).
+ *
+ * However, this scheme is complicated by the fact that we need
+ * to clear the interrupt condition after we have cleared the
+ * original condition in HCTL, and we might have long stale
+ * interrupts which we do need to eventually get rid of. However
+ * clearing interrupts in such a way is racy, so we need to loop
+ * and re-poll HCTL after having done so or we might miss an
+ * event. It's a latency risk, but unlikely and probably worth it.
+ */
+
+ again:
+ if (fsp->active_iopath < 0) {
+ /* That should never happen */
+ if (interrupt)
+ prerror("FSP: Interrupt with no working IO path\n");
+ return;
+ }
+ iop = &fsp->iopath[fsp->active_iopath];
+
+ /* Handle host initiated resets */
+ if (fsp_in_hir(fsp)) {
+ fsp_hir_poll(fsp, iop->psi);
+ return;
+ }
+
+ /* Check for error state and handle R&R completion */
+ fsp_handle_errors(fsp);
+
+ /*
+ * The above might have triggered and R&R, check that we
+ * are still functional
+ */
+ if ((fsp->active_iopath < 0) || fsp_in_hir(fsp))
+ return;
+ iop = &fsp->iopath[fsp->active_iopath];
+
+ /* Read interrupt status (we may or may not use it) */
+ hdir = fsp_rreg(fsp, FSP_HDIR_REG);
+
+ /* Read control now as well so we can trace them */
+ ctl = fsp_rreg(fsp, FSP_MBX1_HCTL_REG);
+
+ /* Ditto with PSI irq state */
+ psi_irq = psi_poll_fsp_interrupt(iop->psi);
+
+ /* Trace it if anything changes */
+ if (hdir != hdir_last_trace || ctl != ctl_last_trace ||
+ interrupt != irq_last_trace || psi_irq != psi_irq_last_trace) {
+ fsp_trace_event(fsp, TRACE_FSP_EVT_POLL_IRQ,
+ interrupt, hdir, ctl, psi_irq);
+
+ hdir_last_trace = hdir;
+ ctl_last_trace = ctl;
+ irq_last_trace = interrupt;
+ psi_irq_last_trace = psi_irq;
+ }
+
+ /*
+ * We *MUST* ignore the MBOX2 bits here. While MBOX2 cannot generate
+ * interrupt, it might still latch some bits here (and we found cases
+ * where the MBOX2 XUP would be set). If that happens, clearing HDIR
+ * never works (the bit gets set again immediately) because we don't
+ * clear the condition in HTCL2 and thus we loop forever.
+ */
+ hdir &= FSP_DBIRQ_MBOX1;
+
+ /*
+ * Sanity check: If an interrupt is pending and we are in polling
+ * mode, check that the PSI side is also pending. If some bit is
+ * set, just clear and move on.
+ */
+ if (hdir && !interrupt && !psi_irq) {
+ prerror("FSP: WARNING ! HDIR 0x%08x but no PSI irq !\n", hdir);
+ fsp_wreg(fsp, FSP_HDIR_REG, hdir);
+ }
+
+ /*
+ * We should never have the mbox in error state here unless it
+ * was fine until some printf inside fsp_handle_errors() caused
+ * the console to poke the FSP which detected a branch new error
+ * in the process. Let's be safe rather than sorry and handle that
+ * here
+ */
+ if (fsp_in_hir(fsp) || fsp->state == fsp_mbx_err) {
+ prerror("FSP: Late error state detection\n");
+ goto again;
+ }
+
+ /*
+ * If we are in an R&R state with an active IO path, we
+ * shouldn't be getting interrupts. If we do, just clear
+ * the condition and print a message
+ */
+ if (fsp->state == fsp_mbx_rr) {
+ if (interrupt) {
+ prerror("FSP: Interrupt in RR state [HDIR=0x%08x]\n",
+ hdir);
+ fsp_wreg(fsp, FSP_HDIR_REG, hdir);
+ }
+ return;
+ }
+
+ /* Poll FSP CTL */
+ if (ctl & (FSP_MBX_CTL_XUP | FSP_MBX_CTL_HPEND))
+ DBG("FSP #%d: poll, ctl: %x\n", fsp->index, ctl);
+
+ /* Do we have a pending message waiting to complete ? */
+ if (ctl & FSP_MBX_CTL_XUP) {
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG, FSP_MBX_CTL_XUP);
+ if (fsp->state == fsp_mbx_send) {
+ /* mbox is free */
+ fsp->state = fsp_mbx_idle;
+
+ /* Complete message (will break the lock) */
+ fsp_complete_send(fsp);
+
+ /* Lock can have been broken, so ctl is now
+ * potentially invalid, let's recheck
+ */
+ goto again;
+ } else {
+ prerror("FSP #%d: Got XUP with no pending message !\n",
+ fsp->index);
+ }
+ }
+
+ if (fsp->state == fsp_mbx_send) {
+ /* XXX Handle send timeouts!!! */
+ }
+
+ /* Is there an incoming message ? This will break the lock as well */
+ if (ctl & FSP_MBX_CTL_HPEND)
+ fsp_handle_incoming(fsp);
+
+ /* Note: Lock may have been broken above, thus ctl might be invalid
+ * now, don't use it any further.
+ */
+
+ /* Check for something else to send */
+ if (fsp->state == fsp_mbx_idle)
+ fsp_check_queues(fsp);
+
+ /* Clear interrupts, and recheck HCTL if any occurred */
+ if (interrupt && hdir) {
+ fsp_wreg(fsp, FSP_HDIR_REG, hdir);
+ goto again;
+ }
+}
+
+void fsp_poll(void)
+{
+ lock(&fsp_lock);
+ __fsp_poll(false);
+ unlock(&fsp_lock);
+}
+
+void fsp_interrupt(void)
+{
+ lock(&fsp_lock);
+ __fsp_poll(true);
+ unlock(&fsp_lock);
+}
+
+int fsp_sync_msg(struct fsp_msg *msg, bool autofree)
+{
+ int rc;
+
+ /* This indication is useful only in the case where
+ * we queue up messages when the FSP takes a r/r.
+ */
+ msg->sync_msg = true;
+ msg->auto_free = autofree;
+
+ rc = fsp_queue_msg(msg, NULL);
+ if (rc)
+ goto bail;
+
+ while(fsp_msg_busy(msg))
+ fsp_poll();
+
+ switch(msg->state) {
+ case fsp_msg_done:
+ rc = 0;
+ break;
+ case fsp_msg_timeout:
+ rc = -1; /* XXX to improve */
+ break;
+ default:
+ rc = -1; /* Should not happen... (assert ?) */
+ }
+
+ if (msg->resp)
+ rc = (msg->resp->word1 >> 8) & 0xff;
+ bail:
+ if (autofree)
+ fsp_freemsg(msg);
+ return rc;
+}
+
+void fsp_register_client(struct fsp_client *client, u8 msgclass)
+{
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(msgclass);
+
+ if (!fsp_present())
+ return;
+ assert(cmdclass);
+ list_add_tail(&cmdclass->clientq, &client->link);
+}
+
+void fsp_unregister_client(struct fsp_client *client, u8 msgclass)
+{
+ struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(msgclass);
+
+ if (!fsp_present())
+ return;
+ assert(cmdclass);
+ list_del_from(&cmdclass->clientq, &client->link);
+}
+
+static int fsp_init_mbox(struct fsp *fsp)
+{
+ unsigned int i;
+ u32 reg;
+
+ /*
+ * Note: The documentation contradicts itself as to
+ * whether the HDIM bits should be set or cleared to
+ * enable interrupts
+ *
+ * This seems to work...
+ */
+
+ /* Mask all interrupts */
+ fsp_wreg(fsp, FSP_HDIM_CLR_REG, FSP_DBIRQ_ALL);
+
+ /* Clear all errors */
+ fsp_wreg(fsp, FSP_HDES_REG, FSP_DBERRSTAT_CLR1 | FSP_DBERRSTAT_CLR2);
+
+ /* Initialize data area as the doco says */
+ for (i = 0; i < 0x40; i += 4)
+ fsp_wreg(fsp, FSP_MBX1_HDATA_AREA + i, 0);
+
+ /*
+ * Clear whatever crap may remain in HDCR. Do not write XDN as that
+ * would be interpreted incorrectly as an R&R completion which
+ * we aren't ready to send yet !
+ */
+ fsp_wreg(fsp, FSP_MBX1_HCTL_REG, FSP_MBX_CTL_XUP | FSP_MBX_CTL_HPEND |
+ FSP_MBX_CTL_HCSP_MASK | FSP_MBX_CTL_DCSP_MASK |
+ FSP_MBX_CTL_PTS);
+
+ /* Clear all pending interrupts */
+ fsp_wreg(fsp, FSP_HDIR_REG, FSP_DBIRQ_ALL);
+
+ /* Enable all mbox1 interrupts */
+ fsp_wreg(fsp, FSP_HDIM_SET_REG, FSP_DBIRQ_MBOX1);
+
+ /* Decode what FSP we are connected to */
+ reg = fsp_rreg(fsp, FSP_SCRATCH0_REG);
+ if (reg & PPC_BIT32(0)) { /* Is it a valid connection */
+ if (reg & PPC_BIT32(3))
+ printf("FSP: Connected to FSP-B\n");
+ else
+ printf("FSP: Connected to FSP-A\n");
+ }
+
+ return 0;
+}
+
+/* We use a single fixed TCE table for all PSI interfaces */
+static void fsp_init_tce_table(void)
+{
+ fsp_tce_table = (u64 *)PSI_TCE_TABLE_BASE;
+
+ /* Memset the larger table even if we only use the smaller
+ * one on P7
+ */
+ memset(fsp_tce_table, 0, PSI_TCE_TABLE_SIZE_P8);
+}
+
+void fsp_tce_map(u32 offset, void *addr, u32 size)
+{
+ u64 raddr = (u64)addr;
+
+ assert(!(offset & 0xfff));
+ assert(!(raddr & 0xfff));
+ assert(!(size & 0xfff));
+
+ size >>= 12;
+ offset >>= 12;
+
+ while(size--) {
+ fsp_tce_table[offset++] = raddr | 0x3;
+ raddr += 0x1000;
+ }
+}
+
+void fsp_tce_unmap(u32 offset, u32 size)
+{
+ assert(!(offset & 0xfff));
+ assert(!(size & 0xfff));
+
+ size >>= 12;
+ offset >>= 12;
+
+ while(size--)
+ fsp_tce_table[offset++] = 0;
+}
+
+static struct fsp *fsp_find_by_index(int index)
+{
+ struct fsp *fsp = first_fsp;
+
+ do {
+ if (fsp->index == index)
+ return fsp;
+ } while (fsp->link != first_fsp);
+
+ return NULL;
+}
+
+static void fsp_init_links(struct dt_node *fsp_node)
+{
+ const struct dt_property *linksprop;
+ int i, index;
+ struct fsp *fsp;
+ struct fsp_iopath *fiop;
+
+ linksprop = dt_find_property(fsp_node, "ibm,psi-links");
+ index = dt_prop_get_u32(fsp_node, "reg");
+ fsp = fsp_find_by_index(index);
+ if (!fsp) {
+ prerror("FSP: FSP with index %d not found\n", index);
+ return;
+ }
+
+ fsp->state = fsp_mbx_idle;
+
+ /* Iterate all links */
+ for (i = 0; i < fsp->iopath_count; i++) {
+ u64 reg;
+ u32 link;
+
+ link = ((const u32 *)linksprop->prop)[i];
+ fiop = &fsp->iopath[i];
+ fiop->psi = psi_find_link(link);
+ if (fiop->psi == NULL) {
+ prerror("FSP #%d: Couldn't find PSI link\n",
+ fsp->index);
+ continue;
+ }
+
+ printf("FSP #%d: Found PSI HB link to chip %d\n",
+ fsp->index, link);
+
+ psi_fsp_link_in_use(fiop->psi);
+
+ /* Get the FSP register window */
+ reg = in_be64(fiop->psi->regs + PSIHB_FSPBAR);
+ fiop->fsp_regs = (void *)(reg | (1ULL << 63) |
+ dt_prop_get_u32(fsp_node, "reg-offset"));
+ }
+}
+
+static void fsp_update_links_states(struct fsp *fsp)
+{
+ struct fsp_iopath *fiop;
+ unsigned int i;
+
+ /* Iterate all links */
+ for (i = 0; i < fsp->iopath_count; i++) {
+ fiop = &fsp->iopath[i];
+ if (!fiop->psi)
+ continue;
+ if (!fiop->psi->working)
+ fiop->state = fsp_path_bad;
+ else if (fiop->psi->active) {
+ fsp->active_iopath = i;
+ fiop->state = fsp_path_active;
+ } else
+ fiop->state = fsp_path_backup;
+ }
+
+ if (fsp->active_iopath >= 0) {
+ if (!active_fsp || (active_fsp != fsp))
+ active_fsp = fsp;
+
+ fsp_inbound_off = 0;
+ fiop = &fsp->iopath[fsp->active_iopath];
+ psi_init_for_fsp(fiop->psi);
+ fsp_init_mbox(fsp);
+ psi_enable_fsp_interrupt(fiop->psi);
+ }
+}
+
+void fsp_reinit_fsp(void)
+{
+ struct fsp *fsp;
+
+ /* Stop polling PSI */
+ psi_set_link_polling(false);
+
+ /* Notify all FSPs to check for an updated link state */
+ for (fsp = first_fsp; fsp; fsp = fsp->link)
+ fsp_update_links_states(fsp);
+}
+
+static void fsp_create_fsp(struct dt_node *fsp_node)
+{
+ const struct dt_property *linksprop;
+ struct fsp *fsp;
+ int count, index;
+
+ index = dt_prop_get_u32(fsp_node, "reg");
+ prerror("FSP #%d: Found in device-tree, setting up...\n", index);
+
+ linksprop = dt_find_property(fsp_node, "ibm,psi-links");
+ if (!linksprop || linksprop->len < 4) {
+ prerror("FSP #%d: No links !\n", index);
+ return;
+ }
+
+ fsp = zalloc(sizeof(struct fsp));
+ if (!fsp) {
+ prerror("FSP #%d: Can't allocate memory !\n", index);
+ return;
+ }
+
+ fsp->index = index;
+ fsp->active_iopath = -1;
+
+ count = linksprop->len / 4;
+ printf("FSP #%d: Found %d IO PATH\n", index, count);
+ if (count > FSP_MAX_IOPATH) {
+ prerror("FSP #%d: WARNING, limited to %d IO PATH\n",
+ index, FSP_MAX_IOPATH);
+ count = FSP_MAX_IOPATH;
+ }
+ fsp->iopath_count = count;
+
+ fsp->link = first_fsp;
+ first_fsp = fsp;
+
+ fsp_init_links(fsp_node);
+ fsp_update_links_states(fsp);
+}
+
+static void fsp_opal_poll(void *data __unused)
+{
+ if (try_lock(&fsp_lock)) {
+ __fsp_poll(false);
+ unlock(&fsp_lock);
+ }
+}
+
+static bool fsp_init_one(const char *compat)
+{
+ struct dt_node *fsp_node;
+ bool inited = false;
+
+ dt_for_each_compatible(dt_root, fsp_node, compat) {
+ if (!inited) {
+ int i;
+
+ /* Initialize the per-class msg queues */
+ for (i = 0;
+ i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) {
+ list_head_init(&fsp_cmdclass[i].msgq);
+ list_head_init(&fsp_cmdclass[i].clientq);
+ list_head_init(&fsp_cmdclass[i].rr_queue);
+ }
+
+ /* Init the queues for RR notifier cmdclass */
+ list_head_init(&fsp_cmdclass_rr.msgq);
+ list_head_init(&fsp_cmdclass_rr.clientq);
+ list_head_init(&fsp_cmdclass_rr.rr_queue);
+
+ /* Register poller */
+ opal_add_poller(fsp_opal_poll, NULL);
+
+ inited = true;
+ }
+
+ /* Create the FSP data structure */
+ fsp_create_fsp(fsp_node);
+ }
+
+ return inited;
+}
+
+void fsp_init(void)
+{
+ printf("FSP: Looking for FSP...\n");
+
+ fsp_init_tce_table();
+
+ if (!fsp_init_one("ibm,fsp1") && !fsp_init_one("ibm,fsp2")) {
+ printf("FSP: No FSP on this machine\n");
+ return;
+ }
+}
+
+bool fsp_present(void)
+{
+ return first_fsp != NULL;
+}
+
+static void fsp_timeout_poll(void *data __unused)
+{
+ u64 now = mftb();
+ u64 timeout_val = 0;
+ u64 cmdclass_resp_bitmask = fsp_cmdclass_resp_bitmask;
+ struct fsp_cmdclass *cmdclass = NULL;
+ struct fsp_msg *req = NULL;
+ u32 index = 0;
+
+ if (timeout_timer == 0)
+ timeout_timer = now + secs_to_tb(30);
+
+ /* The lowest granularity for a message timeout is 30 secs.
+ * So every 30secs, check if there is any message
+ * waiting for a response from the FSP
+ */
+ if ((tb_compare(now, timeout_timer) == TB_AAFTERB) ||
+ (tb_compare(now, timeout_timer) == TB_AEQUALB))
+ timeout_timer = now + secs_to_tb(30);
+ else
+ return;
+
+ while (cmdclass_resp_bitmask) {
+ u64 time_sent = 0;
+ u64 time_to_comp = 0;
+
+ if (!(cmdclass_resp_bitmask & 0x1))
+ goto next_bit;
+
+ cmdclass = &fsp_cmdclass[index];
+ timeout_val = secs_to_tb((cmdclass->timeout) * 60);
+ time_sent = cmdclass->timesent;
+ time_to_comp = now - cmdclass->timesent;
+
+ /* Now check if the response has timed out */
+ if (tb_compare(time_to_comp, timeout_val) == TB_AAFTERB) {
+ u64 resetbit = 0;
+
+ /* Take the FSP lock now and re-check */
+ lock(&fsp_lock);
+ if (!(fsp_cmdclass_resp_bitmask & (1 << index)) ||
+ time_sent != cmdclass->timesent) {
+ unlock(&fsp_lock);
+ goto next_bit;
+ }
+ req = list_top(&cmdclass->msgq, struct fsp_msg, link);
+ log_simple_error(&e_info(OPAL_RC_FSP_POLL_TIMEOUT),
+ "FSP: Response from FSP timed out, word0 = %x,"
+ "word1 = %x state: %d\n",
+ req->word0, req->word1, req->state);
+ fsp_reg_dump();
+ resetbit = ~fsp_get_class_bit(req->word0 & 0xff);
+ fsp_cmdclass_resp_bitmask &= resetbit;
+ cmdclass->timesent = 0;
+ if (req->resp)
+ req->resp->state = fsp_msg_timeout;
+ fsp_complete_msg(req);
+ __fsp_trigger_reset();
+ unlock(&fsp_lock);
+ }
+ next_bit:
+ cmdclass_resp_bitmask = cmdclass_resp_bitmask >> 1;
+ index++;
+ }
+}
+
+void fsp_opl(void)
+{
+ struct dt_node *iplp;
+
+ if (!fsp_present())
+ return;
+
+ /* Send OPL */
+ ipl_state |= ipl_opl_sent;
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_OPL, 0), true);
+ while(!(ipl_state & ipl_got_continue))
+ fsp_poll();
+
+ /* Send continue ACK */
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_CONTINUE_ACK, 0), true);
+
+ /* Wait for various FSP messages */
+ printf("INIT: Waiting for FSP to advertize new role...\n");
+ while(!(ipl_state & ipl_got_new_role))
+ fsp_poll();
+ printf("INIT: Waiting for FSP to request capabilities...\n");
+ while(!(ipl_state & ipl_got_caps))
+ fsp_poll();
+
+ /* Initiate the timeout poller */
+ opal_add_poller(fsp_timeout_poll, NULL);
+
+ /* Tell FSP we are in standby */
+ printf("INIT: Sending HV Functional: Standby...\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x01000000), true);
+
+ /* Wait for FSP functional */
+ printf("INIT: Waiting for FSP functional\n");
+ while(!(ipl_state & ipl_got_fsp_functional))
+ fsp_poll();
+
+ /* Tell FSP we are in running state */
+ printf("INIT: Sending HV Functional: Runtime...\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x02000000), true);
+
+ /*
+ * For the factory reset case, FSP sends us the PCI Bus
+ * Reset request. We don't have to do anything special with
+ * PCI bus numbers here; just send the Power Down message
+ * with modifier 0x02 to FSP.
+ */
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp && dt_find_property(iplp, "pci-busno-reset-ipl")) {
+ printf("INIT: PCI Bus Reset requested. Sending Power Down\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_POWERDOWN_PCIRS, 0), true);
+ }
+
+ /*
+ * Tell FSP we are in running state with all partitions.
+ *
+ * This is need otherwise the FSP will not reset it's reboot count
+ * on failures. Ideally we should send that when we know the
+ * OS is up but we don't currently have a very good way to do
+ * that so this will do as a stop-gap
+ */
+ printf("INIT: Sending HV Functional: Runtime all parts...\n");
+ fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x04000000), true);
+}
+
+uint32_t fsp_adjust_lid_side(uint32_t lid_no)
+{
+ struct dt_node *iplp;
+ const char *side = NULL;
+
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp)
+ side = dt_prop_get_def(iplp, "cec-ipl-side", NULL);
+ if (!side || !strcmp(side, "temp"))
+ lid_no |= ADJUST_T_SIDE_LID_NO;
+ return lid_no;
+}
+
+int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id,
+ uint32_t offset, void *buffer, size_t *length)
+{
+ uint32_t total, remaining = *length;
+ uint64_t baddr;
+ uint64_t balign, boff, bsize;
+ struct fsp_msg *msg;
+ static struct lock fsp_fetch_lock = LOCK_UNLOCKED;
+
+ *length = total = 0;
+
+ if (!fsp_present())
+ return -ENODEV;
+
+ printf("FSP: Fetch data id: %02x sid: %08x to %p (0x%x bytes)\n",
+ id, sub_id, buffer, remaining);
+
+ /*
+ * Use a lock to avoid multiple processors trying to fetch
+ * at the same time and colliding on the TCE space
+ */
+ lock(&fsp_fetch_lock);
+
+ while(remaining) {
+ uint32_t chunk, taddr, woffset, wlen;
+ uint8_t rc;
+
+ /* Calculate alignment skew */
+ baddr = (uint64_t)buffer;
+ balign = baddr & ~0xffful;
+ boff = baddr & 0xffful;
+
+ /* Get a chunk */
+ chunk = remaining;
+ if (chunk > (PSI_DMA_FETCH_SIZE - boff))
+ chunk = PSI_DMA_FETCH_SIZE - boff;
+ bsize = ((boff + chunk) + 0xfff) & ~0xffful;
+
+ printf("FSP: 0x%08x bytes balign=%llx boff=%llx bsize=%llx\n",
+ chunk, balign, boff, bsize);
+ fsp_tce_map(PSI_DMA_FETCH, (void *)balign, bsize);
+ taddr = PSI_DMA_FETCH + boff;
+ msg = fsp_mkmsg(FSP_CMD_FETCH_SP_DATA, 6,
+ flags << 16 | id, sub_id, offset,
+ 0, taddr, chunk);
+ rc = fsp_sync_msg(msg, false);
+ fsp_tce_unmap(PSI_DMA_FETCH, bsize);
+
+ woffset = msg->resp->data.words[1];
+ wlen = msg->resp->data.words[2];
+ printf("FSP: -> rc=0x%02x off: %08x twritten: %08x\n",
+ rc, woffset, wlen);
+ fsp_freemsg(msg);
+
+ /* XXX Is flash busy (0x3f) a reason for retry ? */
+ if (rc != 0 && rc != 2) {
+ unlock(&fsp_fetch_lock);
+ return -EIO;
+ }
+
+ remaining -= wlen;
+ total += wlen;
+ buffer += wlen;
+ offset += wlen;
+
+ /* The doc seems to indicate that we get rc=2 if there's
+ * more data and rc=0 if we reached the end of file, but
+ * it looks like I always get rc=0, so let's consider
+ * an EOF if we got less than what we asked
+ */
+ if (wlen < chunk)
+ break;
+ }
+ unlock(&fsp_fetch_lock);
+
+ *length = total;
+
+ return 0;
+}
+
+/*
+ * Asynchronous fsp fetch data call
+ *
+ * Note:
+ * buffer = PSI DMA address space
+ */
+int fsp_fetch_data_queue(uint8_t flags, uint16_t id, uint32_t sub_id,
+ uint32_t offset, void *buffer, size_t *length,
+ void (*comp)(struct fsp_msg *msg))
+{
+ struct fsp_msg *msg;
+ uint32_t chunk = *length;
+
+ if (!comp)
+ return OPAL_PARAMETER;
+
+ msg = fsp_mkmsg(FSP_CMD_FETCH_SP_DATA, 0x6, flags << 16 | id,
+ sub_id, offset, 0, buffer, chunk);
+ if (!msg) {
+ prerror("FSP: allocation failed!\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ if (fsp_queue_msg(msg, comp)) {
+ fsp_freemsg(msg);
+ prerror("FSP: Failed to queue fetch data message\n");
+ return OPAL_INTERNAL_ERROR;
+ }
+ return OPAL_SUCCESS;
+}
+
+void fsp_used_by_console(void)
+{
+ fsp_lock.in_con_path = true;
+}
diff --git a/hw/gx.c b/hw/gx.c
new file mode 100644
index 0000000..31de7b5
--- /dev/null
+++ b/hw/gx.c
@@ -0,0 +1,158 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <gx.h>
+#include <xscom.h>
+
+/*
+ * Note: This file os only used on P7/P7+
+ */
+
+/* Configuration of the PSI BUID, see the explanation in
+ * interrupts.h
+ */
+static int gx_p7_configure_psi_buid(uint32_t chip, uint32_t buid)
+{
+ uint64_t mode1;
+ int rc;
+
+ rc = xscom_read(chip, GX_P7_MODE1_REG, &mode1);
+ if (rc) {
+ prerror("GX: XSCOM error %d reading GX MODE1 REG\n", rc);
+ return rc;
+ }
+
+ mode1 = SETFIELD(GX_P7_MODE1_PSI_BUID, mode1, buid);
+ mode1 &= ~GX_P7_MODE1_PSI_BUID_DISABLE;
+
+ printf("GX: MODE1_REG set to 0x%llx\n", mode1);
+ rc = xscom_write(chip, GX_P7_MODE1_REG, mode1);
+ if (rc) {
+ prerror("GX: XSCOM error %d writing GX MODE1 REG\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gx_p7p_configure_psi_buid(uint32_t chip, uint32_t buid)
+{
+ uint64_t mode4;
+ int rc;
+
+ rc = xscom_read(chip, GX_P7P_MODE4_REG, &mode4);
+ if (rc) {
+ prerror("GX: XSCOM error %d reading GX MODE1 REG\n", rc);
+ return rc;
+ }
+
+ mode4 = SETFIELD(GX_P7P_MODE4_PSI_BUID, mode4, buid);
+ mode4 &= ~GX_P7P_MODE4_PSI_BUID_DISABLE;
+
+ rc = xscom_write(chip, GX_P7P_MODE4_REG, mode4);
+ if (rc) {
+ prerror("GX: XSCOM error %d writing GX MODE1 REG\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Configure the BUID of the PSI interrupt in the GX
+ * controller.
+ *
+ * @chip: Chip number (0..31)
+ * @buid: 9-bit BUID value
+ */
+int gx_configure_psi_buid(uint32_t chip, uint32_t buid)
+{
+ uint32_t pvr = mfspr(SPR_PVR);
+
+ printf("GX: PSI BUID for PVR %x (type %x) chip %d BUID 0x%x\n",
+ pvr, PVR_TYPE(pvr), chip, buid);
+
+ switch(PVR_TYPE(pvr)) {
+ case PVR_TYPE_P7:
+ return gx_p7_configure_psi_buid(chip, buid);
+ case PVR_TYPE_P7P:
+ return gx_p7p_configure_psi_buid(chip, buid);
+ }
+ return -1;
+}
+
+
+static int gx_p7_configure_tce_bar(uint32_t chip, uint32_t gx, uint64_t addr,
+ uint64_t size)
+{
+ uint32_t areg, mreg;
+ int rc;
+
+ switch (gx) {
+ case 0:
+ areg = GX_P7_GX0_TCE_BAR;
+ mreg = GX_P7_GX0_TCE_MASK;
+ break;
+ case 1:
+ areg = GX_P7_GX1_TCE_BAR;
+ mreg = GX_P7_GX1_TCE_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (addr) {
+ uint64_t taddr, tmask;
+
+ /* The address field contains bits 18 to 43 of the address */
+ taddr = SETFIELD(GX_P7_TCE_BAR_ADDR, 0ul,
+ (addr >> GX_P7_TCE_BAR_ADDR_SHIFT));
+ taddr |= GX_P7_TCE_BAR_ENABLE;
+ tmask = SETFIELD(GX_P7_TCE_MASK, 0ul,
+ ~((size - 1) >> GX_P7_TCE_BAR_ADDR_SHIFT));
+ rc = xscom_write(chip, areg, 0);
+ rc |= xscom_write(chip, mreg, tmask);
+ rc |= xscom_write(chip, areg, taddr);
+ } else {
+ rc = xscom_write(chip, areg, 0);
+ }
+ return rc ? -EIO : 0;
+}
+
+/* Configure the TCE BAR of a given GX bus
+ *
+ * @chip: Chip number (0..31)
+ * @gx : GX bus index
+ * @addr: base address of TCE table
+ * @size: size of TCE table
+ */
+int gx_configure_tce_bar(uint32_t chip, uint32_t gx, uint64_t addr,
+ uint64_t size)
+{
+ uint32_t pvr = mfspr(SPR_PVR);
+
+ printf("GX: TCE BAR for PVR %x (type %x) chip %d gx %d\n",
+ pvr, PVR_TYPE(pvr), chip, gx);
+
+ /* We only support P7... is there a P7+ with P5IOC2 ? */
+ switch(PVR_TYPE(pvr)) {
+ case PVR_TYPE_P7:
+ return gx_p7_configure_tce_bar(chip, gx, addr, size);
+ }
+ return -EINVAL;
+}
+
+
diff --git a/hw/homer.c b/hw/homer.c
new file mode 100644
index 0000000..14cfa1e
--- /dev/null
+++ b/hw/homer.c
@@ -0,0 +1,143 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <xscom.h>
+#include <io.h>
+#include <cpu.h>
+#include <chip.h>
+#include <mem_region.h>
+#include <hostservices.h>
+
+#define PBA_BAR0 0x2013f00
+#define PBA_BARMASK0 0x2013f04
+
+static bool read_pba_bar(struct proc_chip *chip, unsigned int bar_no,
+ uint64_t *base, uint64_t *size)
+{
+ uint64_t bar, mask;
+ int rc;
+
+ rc = xscom_read(chip->id, PBA_BAR0 + bar_no, &bar);
+ if (rc) {
+ prerror("SLW: Error %d reading PBA BAR%d on chip %d\n",
+ rc, bar_no, chip->id);
+ return false;
+ }
+ rc = xscom_read(chip->id, PBA_BARMASK0 + bar_no, &mask);
+ if (rc) {
+ prerror("SLW: Error %d reading PBA BAR MASK%d on chip %d\n",
+ rc, bar_no, chip->id);
+ return false;
+ }
+ printf(" PBA BAR%d : 0x%016llx\n", bar_no, bar);
+ printf(" PBA MASK%d: 0x%016llx\n", bar_no, mask);
+
+ *base = bar & 0x0ffffffffffffffful;
+ *size = (mask | 0xfffff) + 1;
+
+ return (*base) != 0;
+}
+
+static void homer_init_chip(struct proc_chip *chip)
+{
+ uint64_t hbase = 0, hsize = 0;
+ uint64_t sbase, ssize, obase, osize;
+
+ /*
+ * PBA BARs assigned by HB:
+ *
+ * 0 : Entire HOMER
+ * 1 : OCC to Centaur path (we don't care)
+ * 2 : SLW image
+ * 3 : OCC Common area
+ *
+ * We need to reserve the memory covered by BAR 0 and BAR 3, however
+ * on earlier HBs, BAR0 isn't set so we need BAR 2 instead in that
+ * case to cover SLW (OCC not running).
+ */
+ if (read_pba_bar(chip, 0, &hbase, &hsize)) {
+ printf(" HOMER Image at 0x%llx size %lldMB\n",
+ hbase, hsize / 0x100000);
+ mem_reserve("ibm,homer-image", hbase, hsize);
+
+ chip->homer_base = hbase;
+ chip->homer_size = hsize;
+ }
+
+ /*
+ * We always read the SLW BAR since we need to grab info about the
+ * SLW image in the struct proc_chip for use by the slw.c code
+ */
+ if (read_pba_bar(chip, 2, &sbase, &ssize)) {
+ printf(" SLW Image at 0x%llx size %lldMB\n",
+ sbase, ssize / 0x100000);
+
+ /*
+ * Only reserve it if we have no homer image or if it
+ * doesn't fit in it (only check the base).
+ */
+ if (sbase < hbase || sbase > (hbase + hsize) ||
+ (hbase == 0 && sbase > 0))
+ mem_reserve("ibm,slw-image", sbase, ssize);
+
+ chip->slw_base = sbase;
+ chip->slw_bar_size = ssize;
+ chip->slw_image_size = ssize; /* will be adjusted later */
+ }
+
+ if (read_pba_bar(chip, 3, &obase, &osize)) {
+ printf(" OCC Common Area at 0x%llx size %lldMB\n",
+ obase, osize / 0x100000);
+ chip->occ_common_base = obase;
+ chip->occ_common_size = osize;
+ }
+}
+
+void homer_init(void)
+{
+ struct proc_chip *chip;
+
+ if (proc_gen != proc_gen_p8)
+ return;
+
+ /*
+ * XXX This is temporary, on P8 we look for any configured
+ * SLW/OCC BAR and reserve the memory. Eventually, this will be
+ * done via HostBoot using the device-tree "reserved-ranges"
+ * or we'll load the SLW & OCC images ourselves using Host Services.
+ */
+ for_each_chip(chip) {
+ printf("HOMER: Init chip %d\n", chip->id);
+ homer_init_chip(chip);
+ }
+
+ /*
+ * Check is PBA BARs are already loaded with HOMER and
+ * skip host services.
+ */
+
+ chip = next_chip(NULL);
+ if (chip->homer_base && chip->occ_common_base) {
+ /* Reserve OCC comman area from BAR */
+ mem_reserve("ibm,occ-common-area", chip->occ_common_base,
+ chip->occ_common_size);
+ } else {
+ /* Allocate memory for HOMER and OCC common area */
+ host_services_occ_base_setup();
+ }
+}
+
diff --git a/hw/lpc-uart.c b/hw/lpc-uart.c
new file mode 100644
index 0000000..f6037e1
--- /dev/null
+++ b/hw/lpc-uart.c
@@ -0,0 +1,343 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <lpc.h>
+#include <console.h>
+#include <opal.h>
+#include <device.h>
+#include <interrupts.h>
+#include <processor.h>
+#include <fsp-elog.h>
+#include <trace.h>
+
+DEFINE_LOG_ENTRY(OPAL_RC_UART_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_UART,
+ OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+/* UART reg defs */
+#define REG_RBR 0
+#define REG_THR 0
+#define REG_DLL 0
+#define REG_IER 1
+#define REG_DLM 1
+#define REG_FCR 2
+#define REG_IIR 2
+#define REG_LCR 3
+#define REG_MCR 4
+#define REG_LSR 5
+#define REG_MSR 6
+#define REG_SCR 7
+
+#define LSR_DR 0x01 /* Data ready */
+#define LSR_OE 0x02 /* Overrun */
+#define LSR_PE 0x04 /* Parity error */
+#define LSR_FE 0x08 /* Framing error */
+#define LSR_BI 0x10 /* Break */
+#define LSR_THRE 0x20 /* Xmit holding register empty */
+#define LSR_TEMT 0x40 /* Xmitter empty */
+#define LSR_ERR 0x80 /* Error */
+
+#define LCR_DLAB 0x80 /* DLL access */
+
+static uint32_t uart_base;
+static bool has_irq, irq_disabled;
+
+/*
+ * We implement a simple buffer to buffer input data as some bugs in
+ * Linux make it fail to read fast enough after we get an interrupt.
+ *
+ * We use it on non-interrupt operations as well while at it because
+ * it doesn't cost us much and might help in a few cases where Linux
+ * is calling opal_poll_events() but not actually reading.
+ *
+ * Most of the time I expect we'll flush it completely to Linux into
+ * it's tty flip buffers so I don't bother with a ring buffer.
+ */
+#define IN_BUF_SIZE 0x1000
+static uint8_t *in_buf;
+static uint32_t in_count;
+
+static void uart_trace(u8 ctx, u8 cnt, u8 irq_state, u8 in_count)
+{
+ union trace t;
+
+ t.uart.ctx = ctx;
+ t.uart.cnt = cnt;
+ t.uart.irq_state = irq_state;
+ t.uart.in_count = in_count;
+ trace_add(&t, TRACE_UART, sizeof(struct trace_uart));
+}
+
+static inline uint8_t uart_read(unsigned int reg)
+{
+ return lpc_inb(uart_base + reg);
+}
+
+static inline void uart_write(unsigned int reg, uint8_t val)
+{
+ lpc_outb(val, uart_base + reg);
+}
+
+static size_t uart_con_write(const char *buf, size_t len)
+{
+ size_t written = 0;
+
+ while(written < len) {
+ while ((uart_read(REG_LSR) & LSR_THRE) == 0) {
+ int i = 0;
+
+ /* Give the simulator some breathing space */
+ for (; i < 1000; ++i)
+ smt_very_low();
+ }
+ smt_medium();
+ uart_write(REG_THR, buf[written++]);
+ };
+
+ return written;
+}
+
+/* Must be called with console lock held */
+static void uart_read_to_buffer(void)
+{
+ /* As long as there is room in the buffer */
+ while(in_count < IN_BUF_SIZE) {
+ /* Read status register */
+ uint8_t lsr = uart_read(REG_LSR);
+
+ /* Nothing to read ... */
+ if ((lsr & LSR_DR) == 0)
+ break;
+
+ /* Read and add to buffer */
+ in_buf[in_count++] = uart_read(REG_RBR);
+ }
+
+ if (!has_irq)
+ return;
+
+ /* If the buffer is full disable the interrupt */
+ if (in_count == IN_BUF_SIZE) {
+ if (!irq_disabled)
+ uart_write(REG_IER, 0x00);
+ irq_disabled = true;
+ } else {
+ /* Otherwise, enable it */
+ if (irq_disabled)
+ uart_write(REG_IER, 0x01);
+ irq_disabled = false;
+ }
+}
+
+/* This is called with the console lock held */
+static size_t uart_con_read(char *buf, size_t len)
+{
+ size_t read_cnt = 0;
+ uint8_t lsr = 0;
+
+ if (!in_buf)
+ return 0;
+
+ /* Read from buffer first */
+ if (in_count) {
+ read_cnt = in_count;
+ if (len < read_cnt)
+ read_cnt = len;
+ memcpy(buf, in_buf, read_cnt);
+ len -= read_cnt;
+ if (in_count != read_cnt)
+ memmove(in_buf, in_buf + read_cnt, in_count - read_cnt);
+ in_count -= read_cnt;
+ }
+
+ /*
+ * If there's still room in the user buffer, read from the UART
+ * directly
+ */
+ while(len) {
+ lsr = uart_read(REG_LSR);
+ if ((lsr & LSR_DR) == 0)
+ break;
+ buf[read_cnt++] = uart_read(REG_RBR);
+ len--;
+ }
+
+ /* Finally, flush whatever's left in the UART into our buffer */
+ uart_read_to_buffer();
+
+ /* Adjust the OPAL event */
+ if (in_count)
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT,
+ OPAL_EVENT_CONSOLE_INPUT);
+ else
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, 0);
+
+ uart_trace(TRACE_UART_CTX_READ, read_cnt, irq_disabled, in_count);
+
+ return read_cnt;
+}
+
+static struct con_ops uart_con_driver = {
+ .read = uart_con_read,
+ .write = uart_con_write
+};
+
+bool uart_console_poll(void)
+{
+ if (!in_buf)
+ return false;
+
+ /* Grab what's in the UART and stash it into our buffer */
+ uart_read_to_buffer();
+
+ uart_trace(TRACE_UART_CTX_POLL, 0, irq_disabled, in_count);
+
+ return !!in_count;
+}
+
+void uart_irq(void)
+{
+ if (!in_buf)
+ return;
+
+ /* This needs locking vs read() */
+ lock(&con_lock);
+
+ /* Grab what's in the UART and stash it into our buffer */
+ uart_read_to_buffer();
+
+ /* Set the event if the buffer has anything in it */
+ if (in_count)
+ opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT,
+ OPAL_EVENT_CONSOLE_INPUT);
+
+ uart_trace(TRACE_UART_CTX_IRQ, 0, irq_disabled, in_count);
+ unlock(&con_lock);
+}
+
+static bool uart_init_hw(unsigned int speed, unsigned int clock)
+{
+ unsigned int dll = (clock / 16) / speed;
+
+ /* Clear line control */
+ uart_write(REG_LCR, 0x00);
+
+ /* Check if the UART responds */
+ uart_write(REG_IER, 0x01);
+ if (uart_read(REG_IER) != 0x01)
+ goto detect_fail;
+ uart_write(REG_IER, 0x00);
+ if (uart_read(REG_IER) != 0x00)
+ goto detect_fail;
+
+ uart_write(REG_LCR, LCR_DLAB);
+ uart_write(REG_DLL, dll & 0xff);
+ uart_write(REG_DLM, dll >> 8);
+ uart_write(REG_LCR, 0x03); /* 8N1 */
+ uart_write(REG_MCR, 0x03); /* RTS/DTR */
+ uart_write(REG_FCR, 0x07); /* clear & en. fifos */
+ return true;
+
+ detect_fail:
+ prerror("UART: Presence detect failed !\n");
+ return false;
+}
+
+void uart_init(bool enable_interrupt)
+{
+ const struct dt_property *prop;
+ struct dt_node *n;
+ char *path __unused;
+ uint32_t irqchip, irq;
+
+ if (!lpc_present())
+ return;
+
+ /* We support only one */
+ n = dt_find_compatible_node(dt_root, NULL, "ns16550");
+ if (!n)
+ return;
+
+ /* Get IO base */
+ prop = dt_find_property(n, "reg");
+ if (!prop) {
+ log_simple_error(&e_info(OPAL_RC_UART_INIT),
+ "UART: Can't find reg property\n");
+ return;
+ }
+ if (dt_property_get_cell(prop, 0) != OPAL_LPC_IO) {
+ log_simple_error(&e_info(OPAL_RC_UART_INIT),
+ "UART: Only supports IO addresses\n");
+ return;
+ }
+ uart_base = dt_property_get_cell(prop, 1);
+
+ if (!uart_init_hw(dt_prop_get_u32(n, "current-speed"),
+ dt_prop_get_u32(n, "clock-frequency"))) {
+ prerror("UART: Initialization failed\n");
+ dt_add_property_strings(n, "status", "bad");
+ return;
+ }
+
+ /*
+ * Mark LPC used by the console (will mark the relevant
+ * locks to avoid deadlocks when flushing the console)
+ */
+ lpc_used_by_console();
+
+ /* Install console backend for printf() */
+ set_console(&uart_con_driver);
+
+ /* Setup the interrupts properties since HB couldn't do it */
+ irqchip = dt_prop_get_u32(n, "ibm,irq-chip-id");
+ irq = get_psi_interrupt(irqchip) + P8_IRQ_PSI_HOST_ERR;
+ printf("UART: IRQ connected to chip %d, irq# is 0x%x\n", irqchip, irq);
+ if (enable_interrupt) {
+ dt_add_property_cells(n, "interrupts", irq);
+ dt_add_property_cells(n, "interrupt-parent", get_ics_phandle());
+ }
+
+ if (dummy_console_enabled()) {
+ /*
+ * If the dummy console is enabled, we mark the UART as
+ * reserved since we don't want the kernel to start using it
+ * with its own 8250 driver
+ */
+ dt_add_property_strings(n, "status", "reserved");
+
+ /*
+ * If the interrupt is enabled, turn on RX interrupts (and
+ * only these for now
+ */
+ if (enable_interrupt) {
+ uart_write(REG_IER, 0x01);
+ has_irq = true;
+ irq_disabled = false;
+ }
+
+ /* Allocate an input buffer */
+ in_buf = zalloc(IN_BUF_SIZE);
+ printf("UART: Enabled as OS console\n");
+ } else {
+ /* Else, we expose it as our chosen console */
+ dt_add_property_strings(n, "status", "ok");
+ path = dt_get_path(n);
+ dt_add_property_string(dt_chosen, "linux,stdout-path", path);
+ free(path);
+ printf("UART: Enabled as OS pass-through\n");
+ }
+}
diff --git a/hw/lpc.c b/hw/lpc.c
new file mode 100644
index 0000000..8dc533d
--- /dev/null
+++ b/hw/lpc.c
@@ -0,0 +1,500 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <xscom.h>
+#include <io.h>
+#include <lock.h>
+#include <chip.h>
+#include <lpc.h>
+#include <timebase.h>
+#include <fsp-elog.h>
+
+DEFINE_LOG_ENTRY(OPAL_RC_LPC_READ, OPAL_PLATFORM_ERR_EVT, OPAL_LPC,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_LPC_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_LPC,
+ OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+#define ECCB_CTL 0 /* b0020 -> b00200 */
+#define ECCB_STAT 2 /* b0022 -> b00210 */
+#define ECCB_DATA 3 /* b0023 -> b00218 */
+
+#define ECCB_CTL_MAGIC 0xd000000000000000ul
+#define ECCB_CTL_DATASZ_MASK PPC_BITMASK(4,7)
+#define ECCB_CTL_DATASZ_LSH PPC_BITLSHIFT(7)
+#define ECCB_CTL_READ PPC_BIT(15)
+#define ECCB_CTL_ADDRLEN_MASK PPC_BITMASK(23,25)
+#define ECCB_CTL_ADDRLEN_LSH PPC_BITLSHIFT(25)
+#define ECCB_ADDRLEN_4B 0x4
+#define ECCB_CTL_ADDR_MASK PPC_BITMASK(32,63)
+#define ECCB_CTL_ADDR_LSH 0
+
+#define ECCB_STAT_PIB_ERR_MASK PPC_BITMASK(0,5)
+#define ECCB_STAT_PIB_ERR_LSH PPC_BITLSHIFT(5)
+#define ECCB_STAT_RD_DATA_MASK PPC_BITMASK(6,37)
+#define ECCB_STAT_RD_DATA_LSH PPC_BITLSHIFT(37)
+#define ECCB_STAT_BUSY PPC_BIT(44)
+#define ECCB_STAT_ERRORS1_MASK PPC_BITMASK(45,51)
+#define ECCB_STAT_ERRORS1_LSH PPC_BITLSHIFT(51)
+#define ECCB_STAT_OP_DONE PPC_BIT(52)
+#define ECCB_STAT_ERRORS2_MASK PPC_BITMASK(53,55)
+#define ECCB_STAT_ERRORS3_LSH PPC_BITLSHIFT(55)
+
+#define ECCB_STAT_ERR_MASK (ECCB_STAT_PIB_ERR_MASK | \
+ ECCB_STAT_ERRORS1_MASK | \
+ ECCB_STAT_ERRORS2_MASK)
+
+#define ECCB_TIMEOUT 1000000
+
+/* LPC HC registers */
+#define LPC_HC_FW_SEG_IDSEL 0x24
+#define LPC_HC_FW_RD_ACC_SIZE 0x28
+#define LPC_HC_FW_RD_1B 0x00000000
+#define LPC_HC_FW_RD_2B 0x01000000
+#define LPC_HC_FW_RD_4B 0x02000000
+#define LPC_HC_FW_RD_16B 0x04000000
+#define LPC_HC_FW_RD_128B 0x07000000
+
+/* Default LPC bus */
+static int32_t lpc_default_chip_id = -1;
+
+/*
+ * These are expected to be the same on all chips and should probably
+ * be read (or configured) dynamically. This is how things are configured
+ * today on Tuletta.
+ */
+static uint32_t lpc_io_opb_base = 0xd0010000;
+static uint32_t lpc_mem_opb_base = 0xe0000000;
+static uint32_t lpc_fw_opb_base = 0xf0000000;
+static uint32_t lpc_reg_opb_base = 0xc0012000;
+
+static int64_t opb_write(struct proc_chip *chip, uint32_t addr, uint32_t data,
+ uint32_t sz)
+{
+ uint64_t ctl = ECCB_CTL_MAGIC, stat;
+ int64_t rc, tout;
+ uint64_t data_reg;
+
+ switch(sz) {
+ case 1:
+ data_reg = ((uint64_t)data) << 56;
+ break;
+ case 2:
+ data_reg = ((uint64_t)data) << 48;
+ break;
+ case 4:
+ data_reg = ((uint64_t)data) << 32;
+ break;
+ default:
+ prerror("LPC: Invalid data size %d\n", sz);
+ return OPAL_PARAMETER;
+ }
+
+ rc = xscom_write(chip->id, chip->lpc_xbase + ECCB_DATA, data_reg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
+ "LPC: XSCOM write to ECCB DATA error %lld\n", rc);
+ return rc;
+ }
+
+ ctl = SETFIELD(ECCB_CTL_DATASZ, ctl, sz);
+ ctl = SETFIELD(ECCB_CTL_ADDRLEN, ctl, ECCB_ADDRLEN_4B);
+ ctl = SETFIELD(ECCB_CTL_ADDR, ctl, addr);
+ rc = xscom_write(chip->id, chip->lpc_xbase + ECCB_CTL, ctl);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
+ "LPC: XSCOM write to ECCB CTL error %lld\n", rc);
+ return rc;
+ }
+
+ for (tout = 0; tout < ECCB_TIMEOUT; tout++) {
+ rc = xscom_read(chip->id, chip->lpc_xbase + ECCB_STAT, &stat);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
+ "LPC: XSCOM read from ECCB STAT err %lld\n",
+ rc);
+ return rc;
+ }
+ if (stat & ECCB_STAT_OP_DONE) {
+ if (stat & ECCB_STAT_ERR_MASK) {
+ log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
+ "LPC: Error status: 0x%llx\n", stat);
+ return OPAL_HARDWARE;
+ }
+ return OPAL_SUCCESS;
+ }
+ time_wait(100);
+ }
+ log_simple_error(&e_info(OPAL_RC_LPC_WRITE), "LPC: Write timeout !\n");
+ return OPAL_HARDWARE;
+}
+
+static int64_t opb_read(struct proc_chip *chip, uint32_t addr, uint32_t *data,
+ uint32_t sz)
+{
+ uint64_t ctl = ECCB_CTL_MAGIC | ECCB_CTL_READ, stat;
+ int64_t rc, tout;
+
+ if (sz != 1 && sz != 2 && sz != 4) {
+ prerror("LPC: Invalid data size %d\n", sz);
+ return OPAL_PARAMETER;
+ }
+
+ ctl = SETFIELD(ECCB_CTL_DATASZ, ctl, sz);
+ ctl = SETFIELD(ECCB_CTL_ADDRLEN, ctl, ECCB_ADDRLEN_4B);
+ ctl = SETFIELD(ECCB_CTL_ADDR, ctl, addr);
+ rc = xscom_write(chip->id, chip->lpc_xbase + ECCB_CTL, ctl);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_LPC_READ),
+ "LPC: XSCOM write to ECCB CTL error %lld\n", rc);
+ return rc;
+ }
+
+ for (tout = 0; tout < ECCB_TIMEOUT; tout++) {
+ rc = xscom_read(chip->id, chip->lpc_xbase + ECCB_STAT, &stat);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_LPC_READ),
+ "LPC: XSCOM read from ECCB STAT err %lld\n",
+ rc);
+ return rc;
+ }
+ if (stat & ECCB_STAT_OP_DONE) {
+ uint32_t rdata = GETFIELD(ECCB_STAT_RD_DATA, stat);
+ if (stat & ECCB_STAT_ERR_MASK) {
+ log_simple_error(&e_info(OPAL_RC_LPC_READ),
+ "LPC: Error status: 0x%llx\n", stat);
+ return OPAL_HARDWARE;
+ }
+ switch(sz) {
+ case 1:
+ *data = rdata >> 24;
+ break;
+ case 2:
+ *data = rdata >> 16;
+ break;
+ default:
+ *data = rdata;
+ break;
+ }
+ return 0;
+ }
+ time_wait(100);
+ }
+ log_simple_error(&e_info(OPAL_RC_LPC_READ), "LPC: Read timeout !\n");
+ return OPAL_HARDWARE;
+}
+
+static int64_t lpc_set_fw_idsel(struct proc_chip *chip, uint8_t idsel)
+{
+ uint32_t val;
+ int64_t rc;
+
+ if (idsel == chip->lpc_fw_idsel)
+ return OPAL_SUCCESS;
+ if (idsel > 0xf)
+ return OPAL_PARAMETER;
+
+ rc = opb_read(chip, lpc_reg_opb_base + LPC_HC_FW_SEG_IDSEL,
+ &val, 4);
+ if (rc) {
+ prerror("LPC: Failed to read HC_FW_SEG_IDSEL register !\n");
+ return rc;
+ }
+ val = (val & 0xfffffff0) | idsel;
+ rc = opb_write(chip, lpc_reg_opb_base + LPC_HC_FW_SEG_IDSEL,
+ val, 4);
+ if (rc) {
+ prerror("LPC: Failed to write HC_FW_SEG_IDSEL register !\n");
+ return rc;
+ }
+ chip->lpc_fw_idsel = idsel;
+ return OPAL_SUCCESS;
+}
+
+static int64_t lpc_set_fw_rdsz(struct proc_chip *chip, uint8_t rdsz)
+{
+ uint32_t val;
+ int64_t rc;
+
+ if (rdsz == chip->lpc_fw_rdsz)
+ return OPAL_SUCCESS;
+ switch(rdsz) {
+ case 1:
+ val = LPC_HC_FW_RD_1B;
+ break;
+ case 2:
+ val = LPC_HC_FW_RD_2B;
+ break;
+ case 4:
+ val = LPC_HC_FW_RD_4B;
+ break;
+ default:
+ /*
+ * The HW supports 16 and 128 via a buffer/cache
+ * but I have never exprimented with it and am not
+ * sure it works the way we expect so let's leave it
+ * at that for now
+ */
+ return OPAL_PARAMETER;
+ }
+ rc = opb_write(chip, lpc_reg_opb_base + LPC_HC_FW_RD_ACC_SIZE,
+ val, 4);
+ if (rc) {
+ prerror("LPC: Failed to write LPC_HC_FW_RD_ACC_SIZE !\n");
+ return rc;
+ }
+ chip->lpc_fw_rdsz = rdsz;
+ return OPAL_SUCCESS;
+}
+
+static int64_t lpc_opb_prepare(struct proc_chip *chip,
+ enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t sz,
+ uint32_t *opb_base, bool is_write)
+{
+ uint32_t top = addr + sz;
+ uint8_t fw_idsel;
+ int64_t rc;
+
+ /* Address wraparound */
+ if (top < addr)
+ return OPAL_PARAMETER;
+
+ /*
+ * Bound check access and get the OPB base address for
+ * the window corresponding to the access type
+ */
+ switch(addr_type) {
+ case OPAL_LPC_IO:
+ /* IO space is 64K */
+ if (top > 0x10000)
+ return OPAL_PARAMETER;
+ /* And only supports byte accesses */
+ if (sz != 1)
+ return OPAL_PARAMETER;
+ *opb_base = lpc_io_opb_base;
+ break;
+ case OPAL_LPC_MEM:
+ /* MEM space is 256M */
+ if (top > 0x10000000)
+ return OPAL_PARAMETER;
+ /* And only supports byte accesses */
+ if (sz != 1)
+ return OPAL_PARAMETER;
+ *opb_base = lpc_mem_opb_base;
+ break;
+ case OPAL_LPC_FW:
+ /*
+ * FW space is in segments of 256M controlled
+ * by IDSEL, make sure we don't cross segments
+ */
+ *opb_base = lpc_fw_opb_base;
+ fw_idsel = (addr >> 28);
+ if (((top - 1) >> 28) != fw_idsel)
+ return OPAL_PARAMETER;
+
+ /* Set segment */
+ rc = lpc_set_fw_idsel(chip, fw_idsel);
+ if (rc)
+ return rc;
+ /* Set read access size */
+ if (!is_write) {
+ rc = lpc_set_fw_rdsz(chip, sz);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+ return OPAL_SUCCESS;
+}
+
+static int64_t __lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t data, uint32_t sz)
+{
+ struct proc_chip *chip = get_chip(chip_id);
+ uint32_t opb_base;
+ int64_t rc;
+
+ if (!chip || !chip->lpc_xbase)
+ return OPAL_PARAMETER;
+
+ lock(&chip->lpc_lock);
+
+ /*
+ * Convert to an OPB access and handle LPC HC configuration
+ * for FW accesses (IDSEL)
+ */
+ rc = lpc_opb_prepare(chip, addr_type, addr, sz, &opb_base, true);
+ if (rc)
+ goto bail;
+
+ /* Perform OPB access */
+ rc = opb_write(chip, opb_base + addr, data, sz);
+
+ unlock(&chip->lpc_lock);
+
+ /* XXX Add LPC error handling/recovery */
+ bail:
+ return rc;
+}
+
+int64_t lpc_write(enum OpalLPCAddressType addr_type, uint32_t addr,
+ uint32_t data, uint32_t sz)
+{
+ if (lpc_default_chip_id < 0)
+ return OPAL_PARAMETER;
+ return __lpc_write(lpc_default_chip_id, addr_type, addr, data, sz);
+}
+
+/*
+ * The "OPAL" variant add the emulation of 2 and 4 byte accesses using
+ * byte accesses for IO and MEM space in order to be compatible with
+ * existing Linux expectations
+ */
+static int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t data, uint32_t sz)
+{
+ int64_t rc;
+
+ if (addr_type == OPAL_LPC_FW || sz == 1)
+ return __lpc_write(chip_id, addr_type, addr, data, sz);
+ while(sz--) {
+ rc = __lpc_write(chip_id, addr_type, addr, data & 0xff, 1);
+ if (rc)
+ return rc;
+ addr++;
+ data >>= 8;
+ }
+ return OPAL_SUCCESS;
+}
+
+static int64_t __lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t *data, uint32_t sz)
+{
+ struct proc_chip *chip = get_chip(chip_id);
+ uint32_t opb_base;
+ int64_t rc;
+
+ if (!chip || !chip->lpc_xbase)
+ return OPAL_PARAMETER;
+
+ lock(&chip->lpc_lock);
+
+ /*
+ * Convert to an OPB access and handle LPC HC configuration
+ * for FW accesses (IDSEL and read size)
+ */
+ rc = lpc_opb_prepare(chip, addr_type, addr, sz, &opb_base, false);
+ if (rc)
+ goto bail;
+
+ /* Perform OPB access */
+ rc = opb_read(chip, opb_base + addr, data, sz);
+
+ unlock(&chip->lpc_lock);
+
+ /* XXX Add LPC error handling/recovery */
+ bail:
+ return rc;
+}
+
+int64_t lpc_read(enum OpalLPCAddressType addr_type, uint32_t addr,
+ uint32_t *data, uint32_t sz)
+{
+ if (lpc_default_chip_id < 0)
+ return OPAL_PARAMETER;
+ return __lpc_read(lpc_default_chip_id, addr_type, addr, data, sz);
+}
+
+/*
+ * The "OPAL" variant add the emulation of 2 and 4 byte accesses using
+ * byte accesses for IO and MEM space in order to be compatible with
+ * existing Linux expectations
+ */
+static int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t *data, uint32_t sz)
+{
+ int64_t rc;
+
+ if (addr_type == OPAL_LPC_FW || sz == 1)
+ return __lpc_read(chip_id, addr_type, addr, data, sz);
+ *data = 0;
+ while(sz--) {
+ uint32_t byte;
+
+ rc = __lpc_read(chip_id, addr_type, addr, &byte, 1);
+ if (rc)
+ return rc;
+ *data = *data | (byte << (8 * sz));
+ addr++;
+ }
+ return OPAL_SUCCESS;
+}
+
+bool lpc_present(void)
+{
+ return lpc_default_chip_id >= 0;
+}
+
+void lpc_init(void)
+{
+ struct dt_node *xn;
+ bool has_lpc = false;
+
+ dt_for_each_compatible(dt_root, xn, "ibm,power8-lpc") {
+ uint32_t gcid = dt_get_chip_id(xn);
+ struct proc_chip *chip;
+
+ chip = get_chip(gcid);
+ assert(chip);
+
+ chip->lpc_xbase = dt_get_address(xn, 0, NULL);
+ chip->lpc_fw_idsel = 0xff;
+ chip->lpc_fw_rdsz = 0xff;
+ init_lock(&chip->lpc_lock);
+
+ if (lpc_default_chip_id < 0 ||
+ dt_has_node_property(xn, "primary", NULL)) {
+ lpc_default_chip_id = chip->id;
+ }
+
+ printf("LPC: Bus on chip %d PCB_Addr=0x%x\n",
+ chip->id, chip->lpc_xbase);
+ has_lpc = true;
+ }
+ if (lpc_default_chip_id >= 0)
+ printf("LPC: Default bus on chip %d\n", lpc_default_chip_id);
+
+ if (has_lpc) {
+ opal_register(OPAL_LPC_WRITE, opal_lpc_write, 5);
+ opal_register(OPAL_LPC_READ, opal_lpc_read, 5);
+ }
+}
+
+void lpc_used_by_console(void)
+{
+ struct proc_chip *chip;
+
+ xscom_used_by_console();
+
+ for_each_chip(chip)
+ chip->lpc_lock.in_con_path = true;
+}
diff --git a/hw/nx.c b/hw/nx.c
new file mode 100644
index 0000000..8f42717
--- /dev/null
+++ b/hw/nx.c
@@ -0,0 +1,127 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+#include <skiboot.h>
+#include <xscom.h>
+#include <io.h>
+#include <cpu.h>
+#include <nx.h>
+
+#define NX_P7_RNG_BAR XSCOM_SAT(0x1, 0x2, 0x0c)
+#define NX_P7_RNG_BAR_ADDR_MASK PPC_BITMASK(18, 51)
+#define NX_P7_RNG_BAR_ADDR_LSH PPC_BITLSHIFT(51)
+#define NX_P7_RNG_BAR_SIZE_MASK PPC_BITMASK(53, 55)
+#define NX_P7_RNG_BAR_SIZE_LSH PPC_BITLSHIFT(55)
+#define NX_P7_RNG_BAR_ENABLE PPC_BIT(52)
+
+#define NX_P8_RNG_BAR XSCOM_SAT(0xc, 0x2, 0x0d)
+#define NX_P8_RNG_BAR_ADDR_MASK PPC_BITMASK(14, 51)
+#define NX_P8_RNG_BAR_ADDR_LSH PPC_BITLSHIFT(51)
+#define NX_P8_RNG_BAR_SIZE_MASK PPC_BITMASK(53, 55)
+#define NX_P8_RNG_BAR_SIZE_LSH PPC_BITLSHIFT(55)
+#define NX_P8_RNG_BAR_ENABLE PPC_BIT(52)
+
+#define NX_P7_RNG_CFG XSCOM_SAT(0x1, 0x2, 0x12)
+#define NX_P7_RNG_CFG_ENABLE PPC_BIT(63)
+#define NX_P8_RNG_CFG XSCOM_SAT(0xc, 0x2, 0x12)
+#define NX_P8_RNG_CFG_ENABLE PPC_BIT(63)
+
+static void nx_create_node(struct dt_node *node)
+{
+ u64 bar, cfg;
+ u64 xbar, xcfg;
+ u32 pb_base;
+ u32 gcid;
+ u64 rng_addr, rng_len, len;
+ struct dt_node *rng;
+ int rc;
+
+ gcid = dt_get_chip_id(node);
+ pb_base = dt_get_address(node, 0, NULL);
+
+ if (dt_node_is_compatible(node, "ibm,power7-nx")) {
+ xbar = pb_base + NX_P7_RNG_BAR;
+ xcfg = pb_base + NX_P7_RNG_CFG;
+ } else if (dt_node_is_compatible(node, "ibm,power8-nx")) {
+ xbar = pb_base + NX_P8_RNG_BAR;
+ xcfg = pb_base + NX_P8_RNG_CFG;
+ } else {
+ prerror("NX%d: Unknown NX type!\n", gcid);
+ return;
+ }
+
+ rc = xscom_read(gcid, xbar, &bar); /* Get RNG BAR */
+ if (rc)
+ return; /* Hope xscom always prints error message */
+
+ rc = xscom_read(gcid, xcfg, &cfg); /* Get RNG CFG */
+ if (rc)
+ return;
+
+ /*
+ * We use the P8 BAR constants. The layout of the BAR is the
+ * same, with more bits at the top of P8 which are hard wired to
+ * 0 on P7. We also mask in-place rather than using GETFIELD
+ * for the base address as we happen to *know* that it's properly
+ * aligned in the register.
+ *
+ * FIXME? Always assusme BAR gets a valid address from FSP
+ */
+ rng_addr = bar & NX_P8_RNG_BAR_ADDR_MASK;
+ len = GETFIELD(NX_P8_RNG_BAR_SIZE, bar);
+ if (len > 4) {
+ prerror("NX%d: Corrupted bar size %lld\n", gcid, len);
+ return;
+ }
+ rng_len = (u64[]){ 0x1000, /* 4K */
+ 0x10000, /* 64K */
+ 0x400000000, /* 16G*/
+ 0x100000, /* 1M */
+ 0x1000000 /* 16M */} [len];
+
+
+ printf("NX%d: RNG BAR set to 0x%016llx..0x%016llx\n",
+ gcid, rng_addr, rng_addr + rng_len - 1);
+
+ /* RNG must be enabled before MMIO is enabled */
+ rc = xscom_write(gcid, xcfg, cfg | NX_P8_RNG_CFG_ENABLE);
+ if (rc)
+ return;
+
+ /* The BAR needs to be enabled too */
+ rc = xscom_write(gcid, xbar, bar | NX_P8_RNG_BAR_ENABLE);
+ if (rc)
+ return;
+ rng = dt_new_addr(dt_root, "hwrng", rng_addr);
+ if (!rng)
+ return;
+
+ dt_add_property_strings(rng, "compatible", "ibm,power-rng");
+ dt_add_property_cells(rng, "reg", hi32(rng_addr), lo32(rng_addr),
+ hi32(rng_len), lo32(rng_len));
+ dt_add_property_cells(rng, "ibm,chip-id", gcid);
+}
+
+/* Create nodes for MMIO accesible components in NX (only RNG) */
+void nx_init(void)
+{
+ struct dt_node *node;
+
+ dt_for_each_compatible(dt_root, node, "ibm,power-nx")
+ nx_create_node(node);
+}
diff --git a/hw/occ.c b/hw/occ.c
new file mode 100644
index 0000000..7e6284a
--- /dev/null
+++ b/hw/occ.c
@@ -0,0 +1,477 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <xscom.h>
+#include <io.h>
+#include <cpu.h>
+#include <chip.h>
+#include <mem_region.h>
+#include <fsp.h>
+#include <timebase.h>
+#include <hostservices.h>
+#include <fsp-elog.h>
+
+/* OCC Communication Area for PStates */
+
+#define P8_HOMER_SAPPHIRE_DATA_OFFSET 0x1F8000
+
+#define MAX_PSTATES 256
+
+struct occ_pstate_entry {
+ s8 id;
+ u8 flags;
+ u8 vdd;
+ u8 vcs;
+ u32 freq_khz;
+};
+
+struct occ_pstate_table {
+ u8 valid;
+ u8 version;
+ u8 throttle;
+ s8 pstate_min;
+ s8 pstate_nom;
+ s8 pstate_max;
+ u8 spare1;
+ u8 spare2;
+ u64 reserved;
+ struct occ_pstate_entry pstates[MAX_PSTATES];
+};
+
+DEFINE_LOG_ENTRY(OPAL_RC_OCC_LOAD, OPAL_PLATFORM_ERR_EVT, OPAL_OCC,
+ OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_OCC_RESET, OPAL_PLATFORM_ERR_EVT, OPAL_OCC,
+ OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_OCC_PSTATE_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_OCC,
+ OPAL_CEC_HARDWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+/* Check each chip's HOMER/Sapphire area for PState valid bit */
+static bool wait_for_all_occ_init(void)
+{
+ struct proc_chip *chip;
+ uint64_t occ_data_area;
+ struct occ_pstate_table *occ_data;
+ int tries;
+ uint64_t start_time, end_time;
+
+ start_time = mftb();
+ for_each_chip(chip) {
+ /* Check for valid homer address */
+ if (!chip->homer_base) {
+ printf("OCC: Chip: %x homer_base is not valid\n",
+ chip->id);
+ return false;
+ }
+ /* Get PState table address */
+ occ_data_area = chip->homer_base + P8_HOMER_SAPPHIRE_DATA_OFFSET;
+ occ_data = (struct occ_pstate_table *)occ_data_area;
+
+ /*
+ * Checking for occ_data->valid == 1 is ok because we clear all
+ * homer_base+size before passing memory to host services.
+ * This ensures occ_data->valid == 0 before OCC load
+ */
+ tries = 20; /* 2 secs */
+ while((occ_data->valid != 1) && tries--) {
+ time_wait_ms(100);
+ }
+ if (occ_data->valid != 1) {
+ printf("OCC: Chip: %x PState table is not valid\n",
+ chip->id);
+ return false;
+ }
+ printf("OCC: Chip %02x Data (%016llx) = %016llx\n",
+ chip->id, occ_data_area,
+ *(uint64_t *)occ_data_area);
+ }
+ end_time = mftb();
+ printf("OCC: All Chip Rdy after %lld ms\n", (end_time - start_time) / 512 / 1000);
+ return true;
+}
+
+/* Add device tree properties to describe pstates states */
+/* Retrun nominal pstate to set in each core */
+static bool add_cpu_pstate_properties(s8 *pstate_nom)
+{
+ struct proc_chip *chip;
+ uint64_t occ_data_area;
+ struct occ_pstate_table *occ_data;
+ struct dt_node *power_mgt;
+ u8 nr_pstates;
+ /* Arrays for device tree */
+ u32 dt_id[MAX_PSTATES];
+ u32 dt_freq[MAX_PSTATES];
+ int i;
+
+ printf("OCC: CPU pstate state device tree init\n");
+
+ /* Find first chip and core */
+ chip = next_chip(NULL);
+
+ /* Extract PState information from OCC */
+
+ /* Dump state table */
+ occ_data_area = chip->homer_base + P8_HOMER_SAPPHIRE_DATA_OFFSET;
+
+ printf("OCC: Data (%16llx) = %16llx %16llx\n",
+ occ_data_area,
+ *(uint64_t *)occ_data_area,
+ *(uint64_t *)(occ_data_area+8));
+
+ occ_data = (struct occ_pstate_table *)occ_data_area;
+
+ if (!occ_data->valid) {
+ printf("OCC: PState table is not valid\n");
+ return false;
+ }
+
+ nr_pstates = occ_data->pstate_max - occ_data->pstate_min + 1;
+ printf("OCC: Min %d Nom %d Max %d Nr States %d\n",
+ occ_data->pstate_min, occ_data->pstate_nom,
+ occ_data->pstate_max, nr_pstates);
+
+ if (nr_pstates <= 1 || nr_pstates > 128) {
+ printf("OCC: OCC range is not valid\n");
+ return false;
+ }
+
+ /* Setup arrays for device-tree */
+ for( i=0; i < nr_pstates; i++) {
+ dt_id[i] = occ_data->pstates[i].id;
+ dt_freq[i] = occ_data->pstates[i].freq_khz/1000;
+ }
+
+ power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ printf("OCC: dt node /ibm,opal/power-mgt not found\n");
+ return false;
+ }
+
+ /* Add the device-tree entries */
+ dt_add_property(power_mgt, "ibm,pstate-ids", dt_id, nr_pstates * 4);
+ dt_add_property(power_mgt, "ibm,pstate-frequencies-mhz", dt_freq, nr_pstates * 4);
+ dt_add_property_cells(power_mgt, "ibm,pstate-min", occ_data->pstate_min);
+ dt_add_property_cells(power_mgt, "ibm,pstate-nominal", occ_data->pstate_nom);
+ dt_add_property_cells(power_mgt, "ibm,pstate-max", occ_data->pstate_max);
+
+ /* Return pstate to set for each core */
+ *pstate_nom = occ_data->pstate_nom;
+ return true;
+}
+
+/*
+ * Prepare chip for pstate transitions
+ */
+
+static bool cpu_pstates_prepare_core(struct proc_chip *chip, struct cpu_thread *c, s8 pstate_nom)
+{
+ uint32_t core = pir_to_core_id(c->pir);
+ uint64_t tmp, pstate;
+ int rc;
+
+ /*
+ * Currently Fastsleep init clears EX_PM_SPR_OVERRIDE_EN.
+ * Need to ensure only relevant bits are inited
+ */
+
+ /* Init PM GP1 for SCOM based PSTATE control to set nominal freq */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1), &tmp);
+ tmp = tmp | EX_PM_SETUP_GP1_PM_SPR_OVERRIDE_EN;
+ rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1), tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "OCC: Failed to write PM_GP1 in pstates init\n");
+ return false;
+ }
+
+ /* Set new pstate to core */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMCR), &tmp);
+ tmp = tmp & ~0xFFFF000000000000ULL;
+ pstate = ((uint64_t) pstate_nom) & 0xFF;
+ tmp = tmp | (pstate << 56) | (pstate << 48);
+ rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMCR), tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "OCC: Failed to write PM_GP1 in pstates init\n");
+ return false;
+ }
+ time_wait_ms(1); /* Wait for PState to change */
+ /*
+ * Init PM GP1 for SPR based PSTATE control.
+ * Once OCC is active EX_PM_SETUP_GP1_DPLL_FREQ_OVERRIDE_EN will be
+ * cleared by OCC. Sapphire need not clear.
+ * However wait for DVFS state machine to become idle after min->nominal
+ * transition initiated above. If not switch over to SPR control could fail.
+ */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1), &tmp);
+ tmp = tmp & ~EX_PM_SETUP_GP1_PM_SPR_OVERRIDE_EN;
+ rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1), tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "OCC: Failed to write PM_GP1 in pstates init\n");
+ return false;
+ }
+
+ /* Just debug */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMSR), &tmp);
+ printf("OCC: Chip %x Core %x PPMSR %016llx\n", chip->id, core, tmp);
+
+ /*
+ * If PMSR is still in transition at this point due to PState change
+ * initiated above, then the switchover to SPR may not work.
+ * ToDo: Check for DVFS state machine idle before change.
+ */
+
+ return true;
+}
+
+/* CPU-OCC PState init */
+/* Called after OCC init on P8 */
+void occ_pstates_init(void)
+{
+ struct proc_chip *chip;
+ struct cpu_thread *c;
+ s8 pstate_nom;
+
+ /* OCC is P8 only */
+ if (proc_gen != proc_gen_p8)
+ return;
+
+ chip = next_chip(NULL);
+ if (!chip->homer_base) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "OCC: No HOMER detected, assuming no pstates\n");
+ return;
+ }
+
+ /* Wait for all OCC to boot up */
+ if(!wait_for_all_occ_init()) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "OCC: All OCC did not init. Timed Out\n");
+ return;
+ }
+
+ /*
+ * Check boundary conditions and add device tree nodes
+ * and return nominal pstate to set for the core
+ */
+ if (!add_cpu_pstate_properties(&pstate_nom)) {
+ log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
+ "Skiping core cpufreq init due to OCC error\n");
+ return;
+ }
+
+ /* Setup host based pstates and set nominal frequency */
+ for_each_chip(chip) {
+ for_each_available_core_in_chip(c, chip->id) {
+ cpu_pstates_prepare_core(chip, c, pstate_nom);
+ }
+ }
+}
+
+static void occ_do_load(u8 scope, u32 dbob_id __unused, u32 seq_id)
+{
+ struct fsp_msg *rsp, *stat;
+ int rc = -ENOMEM;
+ int status_word = 0;
+ struct proc_chip *chip = next_chip(NULL);
+ u8 err = 0;
+
+ /* Check arguments */
+ if (scope != 0x01 && scope != 0x02) {
+ prerror("OCC: Load message with invalid scope 0x%x\n",
+ scope);
+ err = 0x22;
+ }
+
+ /* First queue up an OK response to the load message itself */
+ rsp = fsp_mkmsg(FSP_RSP_LOAD_OCC, 0 | err);
+ if (rsp)
+ rc = fsp_queue_msg(rsp, fsp_freemsg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_LOAD),
+ "OCC: Error %d queueing FSP OCC LOAD reply\n", rc);
+ return;
+ }
+
+ /* If we had an error, return */
+ if (err)
+ return;
+
+ /* Call HBRT... */
+ rc = host_services_occ_load();
+
+ /* Handle fallback to preload */
+ if (rc == -ENOENT && chip->homer_base) {
+ printf("OCC: Load: Fallback to preloaded image\n");
+ rc = 0;
+ } else if (!rc) {
+ /* Success, start OCC */
+ rc = host_services_occ_start();
+ }
+ if (rc) {
+ /* If either of hostservices call fail, send fail to FSP */
+ /* Find a chip ID to send failure */
+ for_each_chip(chip) {
+ if (scope == 0x01 && dbob_id != chip->dbob_id)
+ continue;
+ status_word = 0xB500 | (chip->pcid & 0xff);
+ break;
+ }
+ log_simple_error(&e_info(OPAL_RC_OCC_LOAD),
+ "OCC: Error %d in load/start OCC\n", err);
+ }
+
+ /* Send a single response for all chips */
+ stat = fsp_mkmsg(FSP_CMD_LOAD_OCC_STAT, 2, status_word, seq_id);
+ if (stat)
+ rc = fsp_queue_msg(stat, fsp_freemsg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_LOAD),
+ "OCC: Error %d queueing FSP OCC LOAD STATUS msg", rc);
+ }
+}
+
+static void occ_do_reset(u8 scope, u32 dbob_id, u32 seq_id)
+{
+ struct fsp_msg *rsp, *stat;
+ struct proc_chip *chip = next_chip(NULL);
+ int rc = -ENOMEM;
+ u8 err = 0;
+
+ /* Check arguments */
+ if (scope != 0x01 && scope != 0x02) {
+ prerror("OCC: Reset message with invalid scope 0x%x\n",
+ scope);
+ err = 0x22;
+ }
+
+ /* First queue up an OK response to the reset message itself */
+ rsp = fsp_mkmsg(FSP_RSP_RESET_OCC, 0 | err);
+ if (rsp)
+ rc = fsp_queue_msg(rsp, fsp_freemsg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_RESET),
+ "OCC: Error %d queueing FSP OCC RESET reply\n", rc);
+ return;
+ }
+
+ /* If we had an error, return */
+ if (err)
+ return;
+
+ /* Call HBRT... */
+ rc = host_services_occ_start();
+
+ /* Handle fallback to preload */
+ if (rc == -ENOENT && chip->homer_base) {
+ printf("OCC: Reset: Fallback to preloaded image\n");
+ rc = 0;
+ }
+ if (!rc) {
+ /* Send a single success response for all chips */
+ stat = fsp_mkmsg(FSP_CMD_RESET_OCC_STAT, 2, 0, seq_id);
+ if (stat)
+ rc = fsp_queue_msg(stat, fsp_freemsg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_RESET),
+ "OCC: Error %d queueing FSP OCC RESET"
+ " STATUS message\n", rc);
+ }
+ } else {
+
+ /*
+ * Then send a matching OCC Reset Status message with an 0xFE
+ * (fail) response code as well to the first matching chip
+ */
+ for_each_chip(chip) {
+ if (scope == 0x01 && dbob_id != chip->dbob_id)
+ continue;
+ rc = -ENOMEM;
+ stat = fsp_mkmsg(FSP_CMD_RESET_OCC_STAT, 2,
+ 0xfe00 | (chip->pcid & 0xff), seq_id);
+ if (stat)
+ rc = fsp_queue_msg(stat, fsp_freemsg);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_OCC_RESET),
+ "OCC: Error %d queueing FSP OCC RESET"
+ " STATUS message\n", rc);
+ }
+ break;
+ }
+ }
+}
+
+static bool fsp_occ_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
+{
+ u32 dbob_id, seq_id;
+ u8 scope;
+
+ switch (cmd_sub_mod) {
+ case FSP_CMD_LOAD_OCC:
+ /*
+ * We get the "Load OCC" command at boot. We don't currently
+ * support loading it ourselves (we don't have the procedures,
+ * they will come with Host Services). For now HostBoot will
+ * have loaded a OCC firmware for us, but we still need to
+ * be nice and respond to OCC.
+ */
+ scope = msg->data.bytes[3];
+ dbob_id = msg->data.words[1];
+ seq_id = msg->data.words[2];
+ printf("OCC: Got OCC Load message, scope=0x%x dbob=0x%x"
+ " seq=0x%x\n", scope, dbob_id, seq_id);
+ occ_do_load(scope, dbob_id, seq_id);
+ return true;
+
+ case FSP_CMD_RESET_OCC:
+ /*
+ * We shouldn't be getting this one, but if we do, we have
+ * to reply something sensible or the FSP will get upset
+ */
+ scope = msg->data.bytes[3];
+ dbob_id = msg->data.words[1];
+ seq_id = msg->data.words[2];
+ printf("OCC: Got OCC Reset message, scope=0x%x dbob=0x%x"
+ " seq=0x%x\n", scope, dbob_id, seq_id);
+ occ_do_reset(scope, dbob_id, seq_id);
+ return true;
+ }
+ return false;
+}
+
+static struct fsp_client fsp_occ_client = {
+ .message = fsp_occ_msg,
+};
+
+void occ_fsp_init(void)
+{
+ /* OCC is P8 only */
+ if (proc_gen != proc_gen_p8)
+ return;
+
+ /* If we have an FSP, register for notifications */
+ if (fsp_present())
+ fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);
+}
+
+
diff --git a/hw/p5ioc2-phb.c b/hw/p5ioc2-phb.c
new file mode 100644
index 0000000..b52fe44
--- /dev/null
+++ b/hw/p5ioc2-phb.c
@@ -0,0 +1,1233 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <p5ioc2.h>
+#include <p5ioc2-regs.h>
+#include <io.h>
+#include <timebase.h>
+#include <affinity.h>
+#include <pci.h>
+#include <pci-cfg.h>
+#include <interrupts.h>
+#include <ccan/str/str.h>
+
+static void p5ioc2_phb_trace(struct p5ioc2_phb *p, FILE *s, const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
+
+static void p5ioc2_phb_trace(struct p5ioc2_phb *p, FILE *s, const char *fmt, ...)
+{
+ /* Use a temp stack buffer to print all at once to avoid
+ * mixups of a trace entry on SMP
+ */
+ char tbuf[128 + 10];
+ va_list args;
+ char *b = tbuf;
+
+ b += sprintf(b, "PHB%d: ", p->phb.opal_id);
+ va_start(args, fmt);
+ vsnprintf(b, 128, fmt, args);
+ va_end(args);
+ fputs(tbuf, s);
+}
+#define PHBDBG(p, fmt...) p5ioc2_phb_trace(p, stdout, fmt)
+#define PHBERR(p, fmt...) p5ioc2_phb_trace(p, stderr, fmt)
+
+/* Helper to set the state machine timeout */
+static inline uint64_t p5ioc2_set_sm_timeout(struct p5ioc2_phb *p, uint64_t dur)
+{
+ uint64_t target, now = mftb();
+
+ target = now + dur;
+ if (target == 0)
+ target++;
+ p->delay_tgt_tb = target;
+
+ return dur;
+}
+
+/*
+ * Lock callbacks. Allows the OPAL API handlers to lock the
+ * PHB around calls such as config space, EEH, etc...
+ */
+static void p5ioc2_phb_lock(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+
+ lock(&p->lock);
+}
+
+static void p5ioc2_phb_unlock(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+
+ unlock(&p->lock);
+}
+
+/*
+ * Configuration space access
+ *
+ * The PHB lock is assumed to be already held
+ */
+static int64_t p5ioc2_pcicfg_address(struct p5ioc2_phb *p, uint32_t bdfn,
+ uint32_t offset, uint32_t size)
+{
+ uint32_t addr, sm = size - 1;
+
+ if (bdfn > 0xffff)
+ return OPAL_PARAMETER;
+ /* XXX Should we enable 4K config space on PCI-X 2.0 ? */
+ if ((offset > 0xff && !p->is_pcie) || offset > 0xfff)
+ return OPAL_PARAMETER;
+ if (offset & sm)
+ return OPAL_PARAMETER;
+
+ /* The root bus only has a device at 0 and we get into an
+ * error state if we try to probe beyond that, so let's
+ * avoid that and just return an error to Linux
+ */
+ if (p->is_pcie && (bdfn >> 8) == 0 && (bdfn & 0xff))
+ return OPAL_HARDWARE;
+
+ /* Prevent special operation generation */
+ if (((bdfn >> 3) & 0x1f) == 0x1f)
+ return OPAL_HARDWARE;
+
+ /* Check PHB state */
+ if (p->state == P5IOC2_PHB_STATE_BROKEN)
+ return OPAL_HARDWARE;
+
+ /* Additionally, should we prevent writes to the PHB own
+ * bus number register ?
+ */
+
+ addr = CAP_PCADR_ENABLE | ((uint64_t)bdfn << CAP_PCADR_FUNC_LSH);
+ addr |= (offset & 0xff);
+ addr |= ((offset & 0xf00) << (CAP_PCADR_EXTOFF_LSH - 8));
+ out_le32(p->regs + CAP_PCADR, addr);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_pcicfg_read8(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint8_t *data)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ int64_t rc;
+
+ /* Initialize data in case of error */
+ *data = 0xff;
+
+ rc = p5ioc2_pcicfg_address(p, bdfn, offset, 1);
+ if (rc)
+ return rc;
+
+ *data = in_8(p->regs + CAP_PCDAT + (offset & 3));
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_pcicfg_read16(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint16_t *data)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ int64_t rc;
+
+ /* Initialize data in case of error */
+ *data = 0xffff;
+
+ rc = p5ioc2_pcicfg_address(p, bdfn, offset, 2);
+ if (rc)
+ return rc;
+
+ *data = in_le16(p->regs + CAP_PCDAT + (offset & 3));
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_pcicfg_read32(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t *data)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ int64_t rc;
+
+ /* Initialize data in case of error */
+ *data = 0xffffffff;
+
+ rc = p5ioc2_pcicfg_address(p, bdfn, offset, 4);
+ if (rc)
+ return rc;
+
+ *data = in_le32(p->regs + CAP_PCDAT);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_pcicfg_write8(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint8_t data)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ int64_t rc;
+
+ rc = p5ioc2_pcicfg_address(p, bdfn, offset, 1);
+ if (rc)
+ return rc;
+
+ out_8(p->regs + CAP_PCDAT + (offset & 3), data);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_pcicfg_write16(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint16_t data)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ int64_t rc;
+
+ rc = p5ioc2_pcicfg_address(p, bdfn, offset, 2);
+ if (rc)
+ return rc;
+
+ out_le16(p->regs + CAP_PCDAT + (offset & 3), data);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_pcicfg_write32(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t data)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ int64_t rc;
+
+ rc = p5ioc2_pcicfg_address(p, bdfn, offset, 4);
+ if (rc)
+ return rc;
+
+ out_le32(p->regs + CAP_PCDAT, data);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_presence_detect(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint16_t slotstat;
+ int64_t rc;
+
+ if (!p->is_pcie) {
+ uint32_t lsr;
+
+ lsr = in_be32(p->regs + SHPC_LOGICAL_SLOT);
+ if (GETFIELD(SHPC_LOGICAL_SLOT_PRSNT, lsr)
+ != SHPC_SLOT_STATE_EMPTY)
+ return OPAL_SHPC_DEV_PRESENT;
+ else
+ return OPAL_SHPC_DEV_NOT_PRESENT;
+ }
+
+ rc = p5ioc2_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_SLOTSTAT,
+ &slotstat);
+ if (rc || !(slotstat & PCICAP_EXP_SLOTSTAT_PDETECTST))
+ return OPAL_SHPC_DEV_NOT_PRESENT;
+ return OPAL_SHPC_DEV_PRESENT;
+}
+
+static int64_t p5ioc2_link_state(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint16_t lstat;
+ int64_t rc;
+
+ /* XXX Test for PHB in error state ? */
+ if (!p->is_pcie)
+ return OPAL_SHPC_LINK_UP_x1;
+
+ rc = p5ioc2_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_LSTAT,
+ &lstat);
+ if (rc < 0) {
+ /* Shouldn't happen */
+ PHBERR(p, "Failed to read link status\n");
+ return OPAL_HARDWARE;
+ }
+ if (!(lstat & PCICAP_EXP_LSTAT_DLLL_ACT))
+ return OPAL_SHPC_LINK_DOWN;
+ return GETFIELD(PCICAP_EXP_LSTAT_WIDTH, lstat);
+}
+
+static int64_t p5ioc2_power_state(struct phb *phb __unused)
+{
+ /* XXX FIXME */
+#if 0
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint64_t reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+
+ /* XXX Test for PHB in error state ? */
+
+ if (reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
+ return OPAL_SHPC_POWER_ON;
+
+ return OPAL_SHPC_POWER_OFF;
+#else
+ return OPAL_SHPC_POWER_ON;
+#endif
+}
+
+/* p5ioc2_sm_slot_power_off - Slot power off state machine
+ */
+static int64_t p5ioc2_sm_slot_power_off(struct p5ioc2_phb *p)
+{
+ switch(p->state) {
+ default:
+ break;
+ }
+
+ /* Unknown state, hardware error ? */
+ return OPAL_HARDWARE;
+}
+
+static int64_t p5ioc2_slot_power_off(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+
+ if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* run state machine */
+ return p5ioc2_sm_slot_power_off(p);
+}
+
+static int64_t p5ioc2_sm_slot_power_on(struct p5ioc2_phb *p __unused)
+{
+#if 0
+ uint64_t reg;
+ uint32_t reg32;
+ uint16_t brctl;
+
+ switch(p->state) {
+ case P5IOC2_PHB_STATE_FUNCTIONAL:
+ /* Check presence */
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+ if (!(reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)) {
+ PHBDBG(p, "Slot power on: no device\n");
+ return OPAL_CLOSED;
+ }
+
+ /* Adjust UTL interrupt settings to disable various
+ * errors that would interfere with the process
+ */
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000);
+
+ /* If the power is not on, turn it on now */
+ if (!(reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)) {
+ reg = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
+ reg &= ~(0x8c00000000000000ul);
+ reg |= 0x8400000000000000ul;
+ out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg);
+ p->state = PHB_STATE_SPUP_STABILIZE_DELAY;
+ PHBDBG(p, "Slot power on: powering on...\n");
+ return p5ioc2_set_sm_timeout(p, secs_to_tb(2));
+ }
+ /* Power is already on */
+ power_ok:
+ /* Ensure hot reset is deasserted */
+ p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
+ brctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
+ p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
+ p->retries = 40;
+ p->state = PHB_STATE_SPUP_WAIT_LINK;
+ PHBDBG(p, "Slot power on: waiting for link\n");
+ /* Fall through */
+ case PHB_STATE_SPUP_WAIT_LINK:
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ /* Link is up ? Complete */
+
+ /* XXX TODO: Check link width problem and if present
+ * go straight to the host reset code path.
+ */
+ if (reg & PHB_PCIE_DLP_TC_DL_LINKACT) {
+ /* Restore UTL interrupts */
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN,
+ 0xfe65000000000000);
+ p->state = PHB_STATE_FUNCTIONAL;
+ PHBDBG(p, "Slot power on: up !\n");
+ return OPAL_SUCCESS;
+ }
+ /* Retries */
+ p->retries--;
+ if (p->retries == 0) {
+ /* XXX Improve logging */
+ PHBERR(p,"Slot power on: Timeout waiting for link\n");
+ goto error;
+ }
+ /* Check time elapsed */
+ if ((p->retries % 20) != 0)
+ return p5ioc2_set_sm_timeout(p, msecs_to_tb(10));
+
+ /* >200ms, time to try a hot reset after clearing the
+ * link status bit (doco says to do so)
+ */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x0080000000000000);
+
+ /* Mask receiver error status in AER */
+ p5ioc2_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, &reg32);
+ reg32 |= PCIECAP_AER_CE_RECVR_ERR;
+ p5ioc2_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, reg32);
+
+ /* Turn on host reset */
+ p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
+ brctl |= PCI_CFG_BRCTL_SECONDARY_RESET;
+ p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
+ p->state = PHB_STATE_SPUP_HOT_RESET_DELAY;
+ PHBDBG(p, "Slot power on: soft reset...\n");
+ return p5ioc2_set_sm_timeout(p, secs_to_tb(1));
+ case PHB_STATE_SPUP_HOT_RESET_DELAY:
+ /* Turn off host reset */
+ p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
+ brctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
+ p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
+ /* Clear spurious errors */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x00e0000000000000);
+ p5ioc2_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_STATUS,
+ PCIECAP_AER_CE_RECVR_ERR);
+ /* Unmask receiver error status in AER */
+ p5ioc2_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, &reg32);
+ reg32 &= ~PCIECAP_AER_CE_RECVR_ERR;
+ p5ioc2_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, reg32);
+ /* Go back to waiting for link */
+ p->state = PHB_STATE_SPUP_WAIT_LINK;
+ PHBDBG(p, "Slot power on: waiting for link (2)\n");
+ return p5ioc2_set_sm_timeout(p, msecs_to_tb(10));
+
+ case PHB_STATE_SPUP_STABILIZE_DELAY:
+ /* Come here after the 2s delay after power up */
+ p->retries = 1000;
+ p->state = PHB_STATE_SPUP_SLOT_STATUS;
+ PHBDBG(p, "Slot power on: waiting for power\n");
+ /* Fall through */
+ case PHB_STATE_SPUP_SLOT_STATUS:
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+
+ /* Doc says to check LED status, but we ignore that, there
+ * no point really and it's easier that way
+ */
+ if (reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
+ goto power_ok;
+ if (p->retries-- == 0) {
+ /* XXX Improve error logging */
+ PHBERR(p, "Timeout powering up slot\n");
+ goto error;
+ }
+ return p5ioc2_set_sm_timeout(p, msecs_to_tb(10));
+ default:
+ break;
+ }
+
+ /* Unknown state, hardware error ? */
+ error:
+ p->state = PHB_STATE_FUNCTIONAL;
+ return OPAL_HARDWARE;
+#else
+ return OPAL_SUCCESS;
+#endif
+}
+
+static int64_t p5ioc2_slot_power_on(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+
+ if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* run state machine */
+ return p5ioc2_sm_slot_power_on(p);
+}
+
+static int64_t p5ioc2_sm_hot_reset(struct p5ioc2_phb *p)
+{
+ switch(p->state) {
+ default:
+ break;
+ }
+
+ /* Unknown state, hardware error ? */
+ return OPAL_HARDWARE;
+}
+
+static int64_t p5ioc2_hot_reset(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+
+ if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* run state machine */
+ return p5ioc2_sm_hot_reset(p);
+}
+
+static int64_t p5ioc2_sm_freset(struct p5ioc2_phb *p)
+{
+ switch(p->state) {
+ default:
+ break;
+ }
+
+ /* XXX Not implemented, return success to make
+ * pci.c happy, otherwise probing of slots will
+ * fail
+ */
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_freset(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+
+ if (p->state != P5IOC2_PHB_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* run state machine */
+ return p5ioc2_sm_freset(p);
+}
+
+static int64_t p5ioc2_poll(struct phb *phb)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint64_t now = mftb();
+
+ if (p->state == P5IOC2_PHB_STATE_FUNCTIONAL)
+ return OPAL_SUCCESS;
+
+ /* Check timer */
+ if (p->delay_tgt_tb &&
+ tb_compare(now, p->delay_tgt_tb) == TB_ABEFOREB)
+ return p->delay_tgt_tb - now;
+
+ /* Expired (or not armed), clear it */
+ p->delay_tgt_tb = 0;
+
+#if 0
+ /* Dispatch to the right state machine */
+ switch(p->state) {
+ case PHB_STATE_SPUP_STABILIZE_DELAY:
+ case PHB_STATE_SPUP_SLOT_STATUS:
+ case PHB_STATE_SPUP_WAIT_LINK:
+ case PHB_STATE_SPUP_HOT_RESET_DELAY:
+ return p5ioc2_sm_slot_power_on(p);
+ case PHB_STATE_SPDOWN_STABILIZE_DELAY:
+ case PHB_STATE_SPDOWN_SLOT_STATUS:
+ return p5ioc2_sm_slot_power_off(p);
+ case PHB_STATE_HRESET_DELAY:
+ return p5ioc2_sm_hot_reset(p);
+ default:
+ break;
+ }
+#endif
+ /* Unknown state, could be a HW error */
+ return OPAL_HARDWARE;
+}
+
+static int64_t p5ioc2_eeh_freeze_status(struct phb *phb, uint64_t pe_number,
+ uint8_t *freeze_state,
+ uint16_t *pci_error_type,
+ uint16_t *severity,
+ uint64_t *phb_status __unused)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint32_t cfgrw;
+
+ /* Defaults: not frozen */
+ *freeze_state = OPAL_EEH_STOPPED_NOT_FROZEN;
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ if (severity)
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+
+ if (pe_number != 0)
+ return OPAL_PARAMETER;
+
+ /* XXX Handle PHB status */
+ /* XXX We currently only check for PE freeze, not fence */
+
+ cfgrw = in_be32(p->regs + CAP_PCFGRW);
+ if (cfgrw & CAP_PCFGRW_MMIO_FROZEN)
+ *freeze_state |= OPAL_EEH_STOPPED_MMIO_FREEZE;
+ if (cfgrw & CAP_PCFGRW_DMA_FROZEN)
+ *freeze_state |= OPAL_EEH_STOPPED_DMA_FREEZE;
+
+ if (severity &&
+ (cfgrw & (CAP_PCFGRW_MMIO_FROZEN | CAP_PCFGRW_MMIO_FROZEN)))
+ *severity = OPAL_EEH_SEV_PE_ER;
+
+ /* XXX Don't bother populating pci_error_type */
+ /* Should read the bits from PLSSR */
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_eeh_next_error(struct phb *phb, uint64_t *first_frozen_pe,
+ uint16_t *pci_error_type, uint16_t *severity)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint32_t cfgrw;
+
+ /* XXX Don't bother */
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *first_frozen_pe = 0;
+
+ cfgrw = in_be32(p->regs + CAP_PCFGRW);
+ if (cfgrw & (CAP_PCFGRW_MMIO_FROZEN | CAP_PCFGRW_MMIO_FROZEN))
+ *severity = OPAL_EEH_SEV_PE_ER;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_eeh_freeze_clear(struct phb *phb, uint64_t pe_number,
+ uint64_t eeh_action_token)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint32_t cfgrw;
+
+ if (pe_number != 0)
+ return OPAL_PARAMETER;
+
+ /*
+ * This sequence isn't very well documented. We play guess
+ * games based on the documentation, what we do on P7IOC,
+ * and common sense.
+ *
+ * Basically we start from the low level (UTL), clear all
+ * error conditions there. Then we clear error conditions
+ * in the PLSSR and DMACSR.
+ *
+ * Once that's done, we unfreeze the PHB
+ *
+ * Note: Should we also clear the error bits in the config
+ * space ? The docs don't say anything... TODO: Check what
+ * OPAL does if possible or ask Milton.
+ */
+
+ /* Clear UTL error regs on PCIe */
+ if (p->is_pcie) {
+ uint32_t err;
+
+ err = in_be32(p->regs + UTL_SYS_BUS_AGENT_STATUS);
+ out_be32(p->regs + UTL_SYS_BUS_AGENT_STATUS, err);
+ err = in_be32(p->regs + UTL_PCIE_PORT_STATUS);
+ out_be32(p->regs + UTL_PCIE_PORT_STATUS, err);
+ err = in_be32(p->regs + UTL_RC_STATUS);
+ out_be32(p->regs + UTL_RC_STATUS, err);
+ }
+
+ /* XXX We should probably clear the error regs in the cfg space... */
+
+ /* Clear PLSSR and DMACSR */
+ out_be32(p->regs + CAP_DMACSR, 0);
+ out_be32(p->regs + CAP_PLSSR, 0);
+
+ /* Clear freeze state as requested */
+ cfgrw = in_be32(p->regs + CAP_PCFGRW);
+ if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO) {
+ cfgrw &= ~CAP_PCFGRW_MMIO_FROZEN;
+ out_be32(p->regs + CAP_PCFGRW, cfgrw);
+ }
+ if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_DMA) {
+ cfgrw &= ~CAP_PCFGRW_DMA_FROZEN;
+ out_be32(p->regs + CAP_PCFGRW, cfgrw);
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_get_msi_64(struct phb *phb __unused, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ uint64_t *msi_address, uint32_t *message_data)
+{
+ if (mve_number > 255 || xive_num > 255 || msi_range != 1)
+ return OPAL_PARAMETER;
+
+ *msi_address = 0x1000000000000000ul;
+ *message_data = xive_num;
+
+ return OPAL_SUCCESS;
+}
+
+static uint8_t p5ioc2_choose_bus(struct phb *phb __unused,
+ struct pci_device *bridge __unused,
+ uint8_t candidate, uint8_t *max_bus __unused,
+ bool *use_max)
+{
+ /* Use standard bus number selection */
+ *use_max = false;
+ return candidate;
+}
+
+/* p5ioc2_phb_ioda_reset - Reset the IODA tables
+ *
+ * This reset the IODA tables in the PHB. It is called at
+ * initialization time, on PHB reset, and can be called
+ * explicitly from OPAL
+ *
+ * Note: We don't handle EEH on p5ioc2, we use no cache
+ * and thus always purge
+ */
+static int64_t p5ioc2_ioda_reset(struct phb *phb, bool purge __unused)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ unsigned int i;
+
+ /* Init XIVRs */
+ for (i = 0; i < 16; i++) {
+ p->xive_cache[i] = SETFIELD(CAP_XIVR_PRIO, 0, 0xff);
+ out_be32(p->regs + CAP_XIVRn(i), 0x000000ff);
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_set_phb_tce_memory(struct phb *phb,
+ uint64_t tce_mem_addr,
+ uint64_t tce_mem_size)
+{
+ struct p5ioc2_phb *p = phb_to_p5ioc2_phb(phb);
+ uint64_t tar;
+ uint32_t cfg;
+
+ printf("PHB%d: set_tce_memory: 0x%016llx 0x%016llx\n",
+ p->index, tce_mem_addr, tce_mem_size);
+ printf("PHB%d: bridge values : 0x%016llx 0x%016llx\n",
+ p->index, p->ioc->tce_base, p->ioc->tce_size);
+
+ /* First check if it fits in the memory established for
+ * the IO HUB
+ */
+ if (tce_mem_addr &&
+ (tce_mem_addr < p->ioc->tce_base ||
+ tce_mem_addr > (p->ioc->tce_base + p->ioc->tce_size) ||
+ (tce_mem_addr + tce_mem_size) >
+ (p->ioc->tce_base + p->ioc->tce_size))) {
+ prerror("PHB%d: TCEs not in bridge range\n", p->index);
+ return OPAL_PARAMETER;
+ }
+
+ /* Supported sizes are power of two's naturally aligned
+ * and between 64K and 8M (p5ioc2 spec)
+ */
+ if (tce_mem_addr && !is_pow2(tce_mem_size)) {
+ prerror("PHB%d: Size is not a power of 2\n", p->index);
+ return OPAL_PARAMETER;
+ }
+ if (tce_mem_addr & (tce_mem_size - 1)) {
+ prerror("PHB%d: Not naturally aligned\n", p->index);
+ return OPAL_PARAMETER;
+ }
+ if (tce_mem_addr &&
+ (tce_mem_size < 0x10000 || tce_mem_size > 0x800000)) {
+ prerror("PHB%d: Size out of range\n", p->index);
+ return OPAL_PARAMETER;
+ }
+
+ /* First we disable TCEs in the bridge */
+ cfg = in_be32(p->regs + CAP_PCFGRW);
+ cfg &= ~CAP_PCFGRW_TCE_EN;
+ out_be32(p->regs + CAP_PCFGRW, cfg);
+
+
+ /* Now there's a blurb in the spec about all TARm needing
+ * to have the same size.. I will let that as a surprise
+ * for the user ... Linux does it fine and I'd rather not
+ * keep more state to check than I need to
+ */
+ tar = 0;
+ if (tce_mem_addr) {
+ tar = SETFIELD(CA_TAR_HUBID, 0ul, p->ca ? 4 : 1);
+ tar = SETFIELD(CA_TAR_ALTHUBID, tar, p->ca ? 4 : 1);
+ tar = SETFIELD(CA_TAR_NUM_TCE, tar, ilog2(tce_mem_size) - 16);
+ tar |= tce_mem_addr; /* addr is naturally aligned */
+ tar |= CA_TAR_VALID;
+ printf("PHB%d: Writing TAR: 0x%016llx\n", p->index, tar);
+ }
+ out_be64(p->ca_regs + CA_TARn(p->index), tar);
+
+ /* Now set the TCE enable if we set a valid address */
+ if (tce_mem_addr) {
+ cfg |= CAP_PCFGRW_TCE_EN;
+ out_be32(p->regs + CAP_PCFGRW, cfg);
+ }
+
+ return OPAL_SUCCESS;
+}
+
+
+static const struct phb_ops p5ioc2_phb_ops = {
+ .lock = p5ioc2_phb_lock,
+ .unlock = p5ioc2_phb_unlock,
+ .cfg_read8 = p5ioc2_pcicfg_read8,
+ .cfg_read16 = p5ioc2_pcicfg_read16,
+ .cfg_read32 = p5ioc2_pcicfg_read32,
+ .cfg_write8 = p5ioc2_pcicfg_write8,
+ .cfg_write16 = p5ioc2_pcicfg_write16,
+ .cfg_write32 = p5ioc2_pcicfg_write32,
+ .choose_bus = p5ioc2_choose_bus,
+ .eeh_freeze_status = p5ioc2_eeh_freeze_status,
+ .eeh_freeze_clear = p5ioc2_eeh_freeze_clear,
+ .next_error = p5ioc2_eeh_next_error,
+ .get_msi_64 = p5ioc2_get_msi_64,
+ .ioda_reset = p5ioc2_ioda_reset,
+ .set_phb_tce_memory = p5ioc2_set_phb_tce_memory,
+ .presence_detect = p5ioc2_presence_detect,
+ .link_state = p5ioc2_link_state,
+ .power_state = p5ioc2_power_state,
+ .slot_power_off = p5ioc2_slot_power_off,
+ .slot_power_on = p5ioc2_slot_power_on,
+ .hot_reset = p5ioc2_hot_reset,
+ .fundamental_reset = p5ioc2_freset,
+ .poll = p5ioc2_poll,
+};
+
+/* p5ioc2_phb_get_xive - Interrupt control from OPAL */
+static int64_t p5ioc2_phb_get_xive(void *data, uint32_t isn,
+ uint16_t *server, uint8_t *prio)
+{
+ struct p5ioc2_phb *p = data;
+ uint32_t irq, xivr, fbuid = P7_IRQ_FBUID(isn);
+
+ if (fbuid != p->buid)
+ return OPAL_PARAMETER;
+ irq = isn & 0xf;
+
+ xivr = p->xive_cache[irq];
+ *server = GETFIELD(CAP_XIVR_SERVER, xivr);
+ *prio = GETFIELD(CAP_XIVR_PRIO, xivr);
+
+ return OPAL_SUCCESS;
+}
+
+/* p5ioc2_phb_set_xive - Interrupt control from OPAL */
+static int64_t p5ioc2_phb_set_xive(void *data, uint32_t isn,
+ uint16_t server, uint8_t prio)
+{
+ struct p5ioc2_phb *p = data;
+ uint32_t irq, xivr, fbuid = P7_IRQ_FBUID(isn);
+
+ if (fbuid != p->buid)
+ return OPAL_PARAMETER;
+ irq = isn & 0xf;
+
+ printf("PHB%d: Set XIVE isn %04x (irq=%d) server=%x, prio=%x\n",
+ p->index, isn, irq, server, prio);
+
+ xivr = SETFIELD(CAP_XIVR_SERVER, 0, server);
+ xivr = SETFIELD(CAP_XIVR_PRIO, xivr, prio);
+ p->xive_cache[irq] = xivr;
+
+ /* Now we mangle the server and priority */
+ if (prio == 0xff) {
+ server = 0;
+ prio = 0xff;
+ } else {
+ prio = (prio >> 3) | ((server & 7) << 5);
+ server = server >> 3;
+ }
+
+ /* We use HRT entry 0 always for now */
+ xivr = SETFIELD(CAP_XIVR_SERVER, 0, server);
+ xivr = SETFIELD(CAP_XIVR_PRIO, xivr, prio);
+ out_be32(p->regs + CAP_XIVRn(irq), xivr);
+ printf("PHB%d: wrote 0x%08x to XIVR %d\n", p->index, xivr, irq);
+
+ return OPAL_SUCCESS;
+}
+
+/* IRQ ops for OS interrupts (not internal) */
+static const struct irq_source_ops p5ioc2_phb_os_irq_ops = {
+ .get_xive = p5ioc2_phb_get_xive,
+ .set_xive = p5ioc2_phb_set_xive,
+};
+
+
+static void p5ioc2_phb_init_utl(struct p5ioc2_phb *p __unused)
+{
+ /* XXX FIXME */
+}
+
+static void p5ioc2_phb_init_pcie(struct p5ioc2_phb *p)
+{
+ int64_t ecap, aercap;
+
+ ecap = pci_find_cap(&p->phb, 0, PCI_CFG_CAP_ID_EXP);
+ if (ecap < 0) {
+ /* Shouldn't happen */
+ prerror("P5IOC2: Failed to locate PCI-E cap in bridge\n");
+ return;
+ }
+ p->ecap = ecap;
+
+ aercap = pci_find_ecap(&p->phb, 0, PCIECAP_ID_AER, NULL);
+ if (aercap < 0) {
+ /* Shouldn't happen */
+ prerror("P5IOC2: Failed to locate AER ext cap in bridge\n");
+ return;
+ }
+ p->aercap = aercap;
+
+ /* XXX plenty more to do ... */
+}
+
+static void p5ioc2_phb_hwinit(struct p5ioc2_phb *p)
+{
+ uint16_t pcicmd;
+ uint32_t phbid;
+
+ printf("P5IOC2: Initializing PHB HW...\n");
+
+ /* Enable PHB and and disable address decoding */
+ phbid = in_be32(p->ca_regs + CA_PHBIDn(p->index));
+ phbid |= CA_PHBID_PHB_ENABLE;
+ phbid &= ~CA_PHBID_ADDRSPACE_ENABLE;
+ out_be32(p->ca_regs + CA_PHBIDn(p->index), phbid);
+
+ /* Set BUID */
+ out_be32(p->regs + CAP_BUID, SETFIELD(CAP_BUID, 0,
+ P7_BUID_BASE(p->buid)));
+ out_be32(p->regs + CAP_MSIBASE, P7_BUID_BASE(p->buid) << 16);
+
+ /* Set IO and Memory mapping */
+ out_be32(p->regs + CAP_IOAD_H, hi32(p->io_base + IO_PCI_START));
+ out_be32(p->regs + CAP_IOAD_L, lo32(p->io_base + IO_PCI_START));
+ out_be32(p->regs + CAP_IOSZ, ~(IO_PCI_SIZE - 1));
+ out_be32(p->regs + CAP_IO_ST, IO_PCI_START);
+ out_be32(p->regs + CAP_MEM1_H, hi32(p->mm_base + MM_PCI_START));
+ out_be32(p->regs + CAP_MEM1_L, lo32(p->mm_base + MM_PCI_START));
+ out_be32(p->regs + CAP_MSZ1, ~(MM_PCI_SIZE - 1));
+ out_be32(p->regs + CAP_MEM_ST, MM_PCI_START);
+
+ /* Setup the MODE registers. We captures the values used
+ * by pHyp/OPAL
+ */
+ out_be32(p->regs + CAP_MODE0, 0x00800010);
+ out_be32(p->regs + CAP_MODE1, 0x00800000);
+ out_be32(p->regs + CAP_MODE3, 0xFFC00050);
+ if (p->is_pcie)
+ out_be32(p->regs + CAP_MODE2, 0x00000400);
+ else
+ out_be32(p->regs + CAP_MODE2, 0x00000408);
+
+ /* XXX Setup of the arbiter... not sure what to do here,
+ * probably system specific (depends on whow things are
+ * wired on the motherboard). I set things up based on
+ * the values I read on a Juno machine. We setup the BPR
+ * with the various timeouts etc... as well based one
+ * similarily captured values
+ */
+ if (p->is_pcie) {
+ out_be32(p->regs + CAP_AER, 0x04000000);
+ out_be32(p->regs + CAP_BPR, 0x0000004f);
+ } else {
+ out_be32(p->regs + CAP_AER, 0x84000000);
+ out_be32(p->regs + CAP_BPR, 0x000f00ff);
+ }
+
+ /* XXX Setup error reporting registers */
+
+ /* Clear errors in PLSSR and DMACSR */
+ out_be32(p->regs + CAP_DMACSR, 0);
+ out_be32(p->regs + CAP_PLSSR, 0);
+
+ /* Configure MSIs on PCIe only */
+ if (p->is_pcie) {
+ /* XXX Check that setting ! That's what OPAL uses but
+ * I suspect it might not be correct. We enable a masking
+ * of 3 bits and no offset, which makes me think only
+ * some MSIs will work... not 100% certain.
+ */
+ out_be32(p->regs + CAP_MVE0, CAP_MVE_VALID |
+ SETFIELD(CAP_MVE_TBL_OFF, 0, 0) |
+ SETFIELD(CAP_MVE_NUM_INT, 0, 0x3));
+ out_be32(p->regs + CAP_MVE1, 0);
+ }
+
+ /* Configuration. We keep TCEs disabled */
+ out_be32(p->regs + CAP_PCFGRW,
+ CAP_PCFGRW_ERR_RECOV_EN |
+ CAP_PCFGRW_FREEZE_EN |
+ CAP_PCFGRW_DAC_DISABLE |
+ (p->is_pcie ? CAP_PCFGRW_MSI_EN : 0));
+
+ /* Re-enable address decode */
+ phbid |= CA_PHBID_ADDRSPACE_ENABLE;
+ out_be32(p->ca_regs + CA_PHBIDn(p->index), phbid);
+
+ /* PCIe specific inits */
+ if (p->is_pcie) {
+ p5ioc2_phb_init_utl(p);
+ p5ioc2_phb_init_pcie(p);
+ }
+
+ /* Take out reset pins on PCI-X. PCI-E will be handled via the hotplug
+ * controller separately
+ */
+ if (!p->is_pcie) {
+ uint32_t val;
+
+ /* Setting 1's will deassert the reset signals */
+ out_be32(p->regs + CAP_CRR, CAP_CRR_RESET1 | CAP_CRR_RESET2);
+
+ /* Set max sub bus */
+ p5ioc2_pcicfg_write8(&p->phb, 0, 0x41, 0xff);
+
+ /* XXX SHPC stuff */
+ printf("P5IOC2: SHPC Slots available 1 : %08x\n",
+ in_be32(p->regs + 0xb20));
+ printf("P5IOC2: SHPC Slots available 2 : %08x\n",
+ in_be32(p->regs + 0xb24));
+ printf("P5IOC2: SHPC Slots config : %08x\n",
+ in_be32(p->regs + 0xb28));
+ printf("P5IOC2: SHPC Secondary bus conf : %08x\n",
+ in_be32(p->regs + 0xb2c));
+
+ p5ioc2_pcicfg_read32(&p->phb, 0, 0, &val);
+ printf("P5IOC2: val0: %08x\n", val);
+ p5ioc2_pcicfg_read32(&p->phb, 0, 4, &val);
+ printf("P5IOC2: val4: %08x\n", val);
+ }
+
+ /* Enable PCI command/status */
+ p5ioc2_pcicfg_read16(&p->phb, 0, PCI_CFG_CMD, &pcicmd);
+ pcicmd |= PCI_CFG_CMD_IO_EN | PCI_CFG_CMD_MEM_EN |
+ PCI_CFG_CMD_BUS_MASTER_EN;
+ p5ioc2_pcicfg_write16(&p->phb, 0, PCI_CFG_CMD, pcicmd);
+
+ p->state = P5IOC2_PHB_STATE_FUNCTIONAL;
+}
+
+static void p5ioc2_pcie_add_node(struct p5ioc2_phb *p)
+{
+ uint64_t reg[2], mmb, iob;
+ uint32_t lsibase, icsp = get_ics_phandle();
+ struct dt_node *np;
+
+ reg[0] = cleanup_addr((uint64_t)p->regs);
+ reg[1] = 0x1000;
+
+ np = dt_new_addr(p->ioc->dt_node, "pciex", reg[0]);
+ if (!np)
+ return;
+
+ p->phb.dt_node = np;
+ dt_add_property_strings(np, "compatible", "ibm,p5ioc2-pciex");
+ dt_add_property_strings(np, "device_type", "pciex");
+ dt_add_property(np, "reg", reg, sizeof(reg));
+ dt_add_property_cells(np, "#address-cells", 3);
+ dt_add_property_cells(np, "#size-cells", 2);
+ dt_add_property_cells(np, "#interrupt-cells", 1);
+ dt_add_property_cells(np, "bus-range", 0, 0xff);
+ dt_add_property_cells(np, "clock-frequency", 0x200, 0); /* ??? */
+ dt_add_property_cells(np, "interrupt-parent", icsp);
+ /* XXX FIXME: add phb own interrupts */
+ dt_add_property_cells(np, "ibm,opal-num-pes", 1);
+ dt_add_property_cells(np, "ibm,opal-msi-ranges", (p->buid << 4) + 5, 8);
+ /* XXX FIXME: add slot-name */
+ iob = cleanup_addr(p->io_base + IO_PCI_START);
+ mmb = cleanup_addr(p->mm_base + MM_PCI_START);
+ dt_add_property_cells(np, "ranges",
+ /* IO space */
+ 0x01000000, 0x00000000, 0x00000000,
+ hi32(iob), lo32(iob), 0, IO_PCI_SIZE,
+ /* M32 space */
+ 0x02000000, 0x00000000, MM_PCI_START,
+ hi32(mmb), lo32(mmb), 0, MM_PCI_SIZE);
+
+ /* Add associativity properties */
+ add_chip_dev_associativity(np);
+
+ /* The interrupt maps will be generated in the RC node by the
+ * PCI code based on the content of this structure:
+ */
+ lsibase = p->buid << 4;
+ p->phb.lstate.int_size = 1;
+ p->phb.lstate.int_val[0][0] = lsibase + 1;
+ p->phb.lstate.int_val[1][0] = lsibase + 2;
+ p->phb.lstate.int_val[2][0] = lsibase + 3;
+ p->phb.lstate.int_val[3][0] = lsibase + 4;
+ p->phb.lstate.int_parent[0] = icsp;
+ p->phb.lstate.int_parent[1] = icsp;
+ p->phb.lstate.int_parent[2] = icsp;
+ p->phb.lstate.int_parent[3] = icsp;
+
+ /* reset clear timestamp... to add if we do a reset and want
+ * to avoid waiting in skiboot
+ */
+ //dt_property_cells("reset-clear-timestamp",....
+}
+
+static void p5ioc2_pcix_add_node(struct p5ioc2_phb *p)
+{
+ uint64_t reg[2], mmb, iob;
+ uint32_t lsibase, icsp = get_ics_phandle();
+ struct dt_node *np;
+
+ reg[0] = cleanup_addr((uint64_t)p->regs);
+ reg[1] = 0x1000;
+
+ np = dt_new_addr(p->ioc->dt_node, "pci", reg[0]);
+ if (!np)
+ return;
+
+ p->phb.dt_node = np;
+ dt_add_property_strings(np, "compatible", "ibm,p5ioc2-pcix");
+ dt_add_property_strings(np, "device_type", "pci");
+ dt_add_property(np, "reg", reg, sizeof(reg));
+ dt_add_property_cells(np, "#address-cells", 3);
+ dt_add_property_cells(np, "#size-cells", 2);
+ dt_add_property_cells(np, "#interrupt-cells", 1);
+ dt_add_property_cells(np, "bus-range", 0, 0xff);
+ dt_add_property_cells(np, "clock-frequency", 0x200, 0); /* ??? */
+ //dt_add_property_cells(np, "bus-width", 8); /* Figure out from VPD ? */
+ dt_add_property_cells(np, "interrupt-parent", icsp);
+ /* XXX FIXME: add phb own interrupts */
+ dt_add_property_cells(np, "ibm,opal-num-pes", 1);
+ /* XXX FIXME: add slot-name */
+ iob = cleanup_addr(p->io_base + IO_PCI_START);
+ mmb = cleanup_addr(p->mm_base + MM_PCI_START);
+ dt_add_property_cells(np, "ranges",
+ /* IO space */
+ 0x01000000, 0x00000000, 0x00000000,
+ hi32(iob), lo32(iob), 0, IO_PCI_SIZE,
+ /* M32 space */
+ 0x02000000, 0x00000000, MM_PCI_START,
+ hi32(mmb), lo32(mmb), 0, MM_PCI_SIZE);
+
+ /* Add associativity properties */
+ add_chip_dev_associativity(np);
+
+ /* The interrupt maps will be generated in the RC node by the
+ * PCI code based on the content of this structure:
+ */
+ lsibase = p->buid << 4;
+ p->phb.lstate.int_size = 1;
+ p->phb.lstate.int_val[0][0] = lsibase + 1;
+ p->phb.lstate.int_val[1][0] = lsibase + 2;
+ p->phb.lstate.int_val[2][0] = lsibase + 3;
+ p->phb.lstate.int_val[3][0] = lsibase + 4;
+ p->phb.lstate.int_parent[0] = icsp;
+ p->phb.lstate.int_parent[1] = icsp;
+ p->phb.lstate.int_parent[2] = icsp;
+ p->phb.lstate.int_parent[3] = icsp;
+
+ /* On PCI-X we need to create an interrupt map here */
+ pci_std_swizzle_irq_map(np, NULL, &p->phb.lstate, 0);
+}
+
+void p5ioc2_phb_setup(struct p5ioc2 *ioc, struct p5ioc2_phb *p,
+ uint8_t ca, uint8_t index, bool active,
+ uint32_t buid)
+{
+ uint32_t phbid;
+
+ p->index = index;
+ p->ca = ca;
+ p->ioc = ioc;
+ p->active = active;
+ p->phb.ops = &p5ioc2_phb_ops;
+ p->buid = buid;
+ p->ca_regs = ca ? ioc->ca1_regs : ioc->ca0_regs;
+ p->regs = p->ca_regs + CA_PHBn_REGS(index);
+
+ printf("P5IOC2: Initializing PHB %d on CA%d, regs @%p, BUID 0x%04x\n",
+ p->index, p->ca, p->regs, p->buid);
+
+ /* Memory map: described in p5ioc2.h */
+ p->mm_base = ca ? ioc->ca1_mm_region : ioc->ca0_mm_region;
+ p->mm_base += MM_WINDOW_SIZE * index;
+ p->io_base = (uint64_t)p->ca_regs;
+ p->io_base += IO_PCI_SIZE * (index + 1);
+ p->state = P5IOC2_PHB_STATE_UNINITIALIZED;
+
+ /* Query PHB type */
+ phbid = in_be32(p->ca_regs + CA_PHBIDn(p->index));
+
+ switch(GETFIELD(CA_PHBID_PHB_TYPE, phbid)) {
+ case CA_PHBTYPE_PCIX1_0:
+ p->is_pcie = false;
+ p->phb.scan_map = 0x0003;
+ p->phb.phb_type = phb_type_pcix_v1;
+ printf("P5IOC2: PHB is PCI/PCI-X 1.0\n");
+ break;
+ case CA_PHBTYPE_PCIX2_0:
+ p->is_pcie = false;
+ p->phb.scan_map = 0x0003;
+ p->phb.phb_type = phb_type_pcix_v2;
+ printf("P5IOC2: PHB is PCI/PCI-X 2.0\n");
+ break;
+ case CA_PHBTYPE_PCIE_G1:
+ p->is_pcie = true;
+ p->phb.scan_map = 0x0001;
+ p->phb.phb_type = phb_type_pcie_v1;
+ printf("P5IOC2: PHB is PCI Express Gen 1\n");
+ break;
+ case CA_PHBTYPE_PCIE_G2:
+ p->is_pcie = true;
+ p->phb.scan_map = 0x0001;
+ p->phb.phb_type = phb_type_pcie_v2;
+ printf("P5IOC2: PHB is PCI Express Gen 2\n");
+ break;
+ default:
+ printf("P5IOC2: Unknown PHB type ! phbid=%08x\n", phbid);
+ p->is_pcie = true;
+ p->phb.scan_map = 0x0001;
+ p->phb.phb_type = phb_type_pcie_v1;
+ }
+
+ /* Find P5IOC2 base location code in IOC */
+ p->phb.base_loc_code = dt_prop_get_def(ioc->dt_node,
+ "ibm,io-base-loc-code", NULL);
+ if (!p->phb.base_loc_code)
+ prerror("P5IOC2: Base location code not found !\n");
+
+ /* Add device nodes */
+ if (p->is_pcie)
+ p5ioc2_pcie_add_node(p);
+ else
+ p5ioc2_pcix_add_node(p);
+
+ /* Initialize PHB HW */
+ p5ioc2_phb_hwinit(p);
+
+ /* Register all 16 interrupt sources for now as OS visible
+ *
+ * If we ever add some EEH, we might take out the error interrupts
+ * and register them as OPAL internal interrupts instead
+ */
+ register_irq_source(&p5ioc2_phb_os_irq_ops, p, p->buid << 4, 16);
+
+ /* We cannot query the PHB type yet as the registers aren't routed
+ * so we'll do that in the inits, at which point we'll establish
+ * the scan map
+ */
+
+ /* We register the PHB before we initialize it so we
+ * get a useful OPAL ID for it
+ */
+ pci_register_phb(&p->phb);
+
+ /* Platform additional setup */
+ if (platform.pci_setup_phb)
+ platform.pci_setup_phb(&p->phb, p->index);
+}
+
diff --git a/hw/p5ioc2.c b/hw/p5ioc2.c
new file mode 100644
index 0000000..d8b9591
--- /dev/null
+++ b/hw/p5ioc2.c
@@ -0,0 +1,297 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <p5ioc2.h>
+#include <p5ioc2-regs.h>
+#include <cec.h>
+#include <gx.h>
+#include <opal.h>
+#include <interrupts.h>
+#include <device.h>
+#include <timebase.h>
+#include <vpd.h>
+#include <ccan/str/str.h>
+
+
+static int64_t p5ioc2_set_tce_mem(struct io_hub *hub, uint64_t address,
+ uint64_t size)
+{
+ struct p5ioc2 *ioc = iohub_to_p5ioc2(hub);
+ int64_t rc;
+
+ printf("P5IOC2: set_tce_mem(0x%016llx size 0x%llx)\n",
+ address, size);
+
+ /* The address passed must be naturally aligned */
+ if (address && !is_pow2(size))
+ return OPAL_PARAMETER;
+ if (address & (size - 1))
+ return OPAL_PARAMETER;
+
+ ioc->tce_base = address;
+ ioc->tce_size = size;
+
+ rc = gx_configure_tce_bar(ioc->host_chip, ioc->gx_bus,
+ address, size);
+ if (rc)
+ return OPAL_INTERNAL_ERROR;
+ return OPAL_SUCCESS;
+}
+
+static int64_t p5ioc2_get_diag_data(struct io_hub *hub __unused,
+ void *diag_buffer __unused,
+ uint64_t diag_buffer_len __unused)
+{
+ /* XXX Not yet implemented */
+ return OPAL_UNSUPPORTED;
+}
+
+static const struct io_hub_ops p5ioc2_hub_ops = {
+ .set_tce_mem = p5ioc2_set_tce_mem,
+ .get_diag_data = p5ioc2_get_diag_data,
+};
+
+static void p5ioc2_inits(struct p5ioc2 *ioc)
+{
+ uint64_t val;
+ unsigned int p, n;
+
+ printf("P5IOC2: Initializing hub...\n");
+
+ /*
+ * BML base inits
+ */
+ /* mask off interrupt presentation timeout in FIRMC */
+ out_be64(ioc->regs + (P5IOC2_FIRMC | P5IOC2_REG_OR),
+ 0x0000080000000000);
+
+ /* turn off display alter mode */
+ out_be64(ioc->regs + (P5IOC2_CTL | P5IOC2_REG_AND),
+ 0xffffff7fffffffff);
+
+ /* setup hub and clustering interrupts BUIDs to 1 and 2 */
+ out_be64(ioc->regs + P5IOC2_SBUID, 0x0001000200000000);
+
+ /* setup old style MSI BUID (should be unused but set it up anyway) */
+ out_be32(ioc->regs + P5IOC2_BUCO, 0xf);
+
+ /* Set XIXO bit 0 needed for "enhanced" TCEs or else TCE
+ * fetches appear as normal memory reads on GX causing
+ * P7 to checkstop when a TCE DKill collides with them.
+ */
+ out_be64(ioc->regs + P5IOC2_XIXO, in_be64(ioc->regs + P5IOC2_XIXO)
+ | P5IOC2_XIXO_ENH_TCE);
+
+ /* Clear routing tables */
+ for (n = 0; n < 16; n++) {
+ for (p = 0; p < 8; p++)
+ out_be64(ioc->regs + P5IOC2_TxRTE(p,n), 0);
+ }
+ for (n = 0; n < 32; n++)
+ out_be64(ioc->regs + P5IOC2_BUIDRTE(n), 0);
+
+ /*
+ * Setup routing. We use the same setup that pHyp appears
+ * to do (after inspecting the various registers with SCOM)
+ *
+ * We assume the BARs are already setup by the FSP such
+ * that BAR0 is 128G (8G region size) and BAR6 is
+ * 256M (16M region size).
+ *
+ * The routing is based on what pHyp and BML do, each Calgary
+ * get one slice of BAR6 and two slices of BAR0
+ */
+ /* BAR 0 segments 0 & 1 -> CA0 */
+ out_be64(ioc->regs + P5IOC2_TxRTE(0,0),
+ P5IOC2_TxRTE_VALID | P5IOC2_CA0_RIO_ID);
+ out_be64(ioc->regs + P5IOC2_TxRTE(0,1),
+ P5IOC2_TxRTE_VALID | P5IOC2_CA0_RIO_ID);
+
+ /* BAR 0 segments 2 & 3 -> CA1 */
+ out_be64(ioc->regs + P5IOC2_TxRTE(0,2),
+ P5IOC2_TxRTE_VALID | P5IOC2_CA1_RIO_ID);
+ out_be64(ioc->regs + P5IOC2_TxRTE(0,3),
+ P5IOC2_TxRTE_VALID | P5IOC2_CA1_RIO_ID);
+
+ /* BAR 6 segments 0 -> CA0 */
+ out_be64(ioc->regs + P5IOC2_TxRTE(6,0),
+ P5IOC2_TxRTE_VALID | P5IOC2_CA0_RIO_ID);
+
+ /* BAR 6 segments 1 -> CA0 */
+ out_be64(ioc->regs + P5IOC2_TxRTE(6,1),
+ P5IOC2_TxRTE_VALID | P5IOC2_CA1_RIO_ID);
+
+ /*
+ * BUID routing, we send entries 1 to CA0 and 2 to CA1
+ * just like pHyp and make sure the base and mask are
+ * both clear in SID to we route the whole 512 block
+ */
+ val = in_be64(ioc->regs + P5IOC2_SID);
+ val = SETFIELD(P5IOC2_SID_BUID_BASE, val, 0);
+ val = SETFIELD(P5IOC2_SID_BUID_MASK, val, 0);
+ out_be64(ioc->regs + P5IOC2_SID, val);
+ out_be64(ioc->regs + P5IOC2_BUIDRTE(1),
+ P5IOC2_BUIDRTE_VALID | P5IOC2_BUIDRTE_RR_RET |
+ P5IOC2_CA0_RIO_ID);
+ out_be64(ioc->regs + P5IOC2_BUIDRTE(2),
+ P5IOC2_BUIDRTE_VALID | P5IOC2_BUIDRTE_RR_RET |
+ P5IOC2_CA1_RIO_ID);
+}
+
+static void p5ioc2_ca_init(struct p5ioc2 *ioc, int ca)
+{
+ void *regs = ca ? ioc->ca1_regs : ioc->ca0_regs;
+ uint64_t val;
+
+ printf("P5IOC2: Initializing Calgary %d...\n", ca);
+
+ /* Setup device BUID */
+ val = SETFIELD(CA_DEVBUID, 0ul, ca ? P5IOC2_CA1_BUID : P5IOC2_CA0_BUID);
+ out_be32(regs + CA_DEVBUID, val);
+
+ /* Setup HubID in TARm (and keep TCE clear, Linux will init that)
+ *
+ * BML and pHyp sets the values to 1 for CA0 and 4 for CA1. We
+ * keep the TAR valid bit clear as well.
+ */
+ val = SETFIELD(CA_TAR_HUBID, 0ul, ca ? 4 : 1);
+ val = SETFIELD(CA_TAR_ALTHUBID, val, ca ? 4 : 1);
+ out_be64(regs + CA_TAR0, val);
+ out_be64(regs + CA_TAR1, val);
+ out_be64(regs + CA_TAR2, val);
+ out_be64(regs + CA_TAR3, val);
+
+ /* Bridge config register. We set it up to the same value as observed
+ * under pHyp on a Juno machine. The difference from the IPL value is
+ * that TCE buffers are enabled, discard timers are increased and
+ * we disable response status to avoid errors.
+ */
+ //out_be64(regs + CA_CCR, 0x5045DDDED2000000);
+ // disable memlimit:
+ out_be64(regs + CA_CCR, 0x5005DDDED2000000);
+
+ /* The system memory base/limit etc... setup will be done when the
+ * user enables TCE via OPAL calls
+ */
+}
+
+static void p5ioc2_create_hub(struct dt_node *np)
+{
+ struct p5ioc2 *ioc;
+ unsigned int i, id, irq;
+ char *path;
+
+ /* Use the BUID extension as ID and add it to device-tree */
+ id = dt_prop_get_u32(np, "ibm,buid-ext");
+ path = dt_get_path(np);
+ printf("P5IOC2: Found at %s ID 0x%x\n", path, id);
+ free(path);
+ dt_add_property_cells(np, "ibm,opal-hubid", 0, id);
+
+ /* Load VPD LID */
+ vpd_iohub_load(np);
+
+ ioc = zalloc(sizeof(struct p5ioc2));
+ if (!ioc)
+ return;
+ ioc->hub.hub_id = id;
+ ioc->hub.ops = &p5ioc2_hub_ops;
+ ioc->dt_node = np;
+
+ /* We assume SBAR == GX0 + some hard coded offset */
+ ioc->regs = (void *)dt_get_address(np, 0, NULL);
+
+ /* For debugging... */
+ for (i = 0; i < 8; i++)
+ printf("P5IOC2: BAR%d = 0x%016llx M=0x%16llx\n", i,
+ in_be64(ioc->regs + P5IOC2_BAR(i)),
+ in_be64(ioc->regs + P5IOC2_BARM(i)));
+
+ ioc->host_chip = dt_get_chip_id(np);
+
+ ioc->gx_bus = dt_prop_get_u32(np, "ibm,gx-index");
+
+ /* Rather than reading the BARs in P5IOC2, we "know" that
+ * BAR6 matches GX BAR 1 and BAR0 matches GX BAR 2. This
+ * is a bit fishy but will work for the few machines this
+ * is intended to work on
+ */
+ ioc->bar6 = dt_prop_get_u64(np, "ibm,gx-bar-1");
+ ioc->bar0 = dt_prop_get_u64(np, "ibm,gx-bar-2");
+
+ printf("DT BAR6 = 0x%016llx\n", ioc->bar6);
+ printf("DT BAR0 = 0x%016llx\n", ioc->bar0);
+
+ /* We setup the corresponding Calgary register bases and memory
+ * regions. Note: those cannot be used until the routing has
+ * been setup by inits
+ */
+ ioc->ca0_regs = (void *)ioc->bar6 + P5IOC2_CA0_REG_OFFSET;
+ ioc->ca1_regs = (void *)ioc->bar6 + P5IOC2_CA1_REG_OFFSET;
+ ioc->ca0_mm_region = ioc->bar0 + P5IOC2_CA0_MM_OFFSET;
+ ioc->ca1_mm_region = ioc->bar0 + P5IOC2_CA1_MM_OFFSET;
+
+ /* Base of our BUIDs, will be refined later */
+ ioc->buid_base = id << 9;
+
+ /* Add interrupts: XXX These are the hub interrupts, we should add the
+ * calgary ones as well... but we don't handle any of them currently
+ * anyway.
+ */
+ irq = (ioc->buid_base + 1) << 4;
+ dt_add_property_cells(np, "interrupts", irq, irq + 1);
+ dt_add_property_cells(np, "interrupt-base", irq);
+
+
+ /* Now, we do the bulk of the inits */
+ p5ioc2_inits(ioc);
+ p5ioc2_ca_init(ioc, 0);
+ p5ioc2_ca_init(ioc, 1);
+
+ /* So how do we know what PHBs to create ? Let's try all of them
+ * and we'll see if that causes problems. TODO: Use VPD !
+ */
+ for (i = 0; i < 4; i++)
+ p5ioc2_phb_setup(ioc, &ioc->ca0_phbs[i], 0, i, true,
+ ioc->buid_base + P5IOC2_CA0_BUID + i + 1);
+ for (i = 0; i < 4; i++)
+ p5ioc2_phb_setup(ioc, &ioc->ca1_phbs[i], 1, i, true,
+ ioc->buid_base + P5IOC2_CA1_BUID + i + 1);
+
+ /* Reset delay... synchronous, hope we never do that as a
+ * result of an OPAL callback. We shouldn't really need this
+ * here and may fold it in the generic slot init sequence but
+ * it's not like we care much about that p5ioc2 code...
+ *
+ * This is mostly to give devices a chance to settle after
+ * having lifted the reset pin on PCI-X.
+ */
+ time_wait_ms(1000);
+
+ printf("P5IOC2: Initialization complete\n");
+
+ cec_register(&ioc->hub);
+}
+
+void probe_p5ioc2(void)
+{
+ struct dt_node *np;
+
+ dt_for_each_compatible(dt_root, np, "ibm,p5ioc2")
+ p5ioc2_create_hub(np);
+}
+
diff --git a/hw/p7ioc-inits.c b/hw/p7ioc-inits.c
new file mode 100644
index 0000000..dc5c370
--- /dev/null
+++ b/hw/p7ioc-inits.c
@@ -0,0 +1,1096 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This inits are in part auto-generated from tables coming
+ * from the HW guys, then hand updated
+ */
+#include <skiboot.h>
+#include <p7ioc.h>
+#include <p7ioc-regs.h>
+#include <io.h>
+#include <processor.h>
+#include <timebase.h>
+
+#undef DUMP_CI_ROUTING
+#undef DUMP_REG_WRITES
+
+#ifdef DUMP_REG_WRITES
+#define REGW(offset, value) do { \
+ out_be64(ioc->regs + (offset), (value)); \
+ printf(" REGW: %06lx=%016lx RB: %016llx\n", \
+ (unsigned long)(offset), \
+ (unsigned long)(value), \
+ in_be64(ioc->regs + (offset))); \
+ in_be64(ioc->regs + (offset)); \
+ } while(0)
+#else
+#define REGW(offset, value) do { \
+ out_be64(ioc->regs + (offset), (value)); \
+ in_be64(ioc->regs + (offset)); \
+ } while(0)
+#endif
+#define REGR(offset) in_be64(ioc->regs + (offset))
+
+static void p7ioc_init_BI(struct p7ioc *ioc)
+{
+ printf("P7IOC: Init BI...\n");
+
+ /*** General settings ***/
+
+ /* Init_1 and Init_2: Different between P7 and P7+ */
+ if (PVR_TYPE(mfspr(SPR_PVR)) == PVR_TYPE_P7P) {
+ printf("P7IOC: -> Configured for P7+\n");
+
+ /* Chicken switches */
+ REGW(0x3c00d8, 0x0004000000000600);
+ /* GX config */
+ REGW(0x3c00a0, 0x9F8929BE00880085);
+ } else {
+ printf("P7IOC: -> Configured for P7\n");
+
+ /* P7 setting assumes "early write done" mode is
+ * enabled in the GX controller. It seems to be
+ * the case but maybe we want to check/set it via
+ * xscom ?
+ */
+ /* Chicken switches */
+ REGW(0x3c00d8, 0x00040000000004C0);
+ /* GX config */
+ REGW(0x3c00a0, 0x9C8929BE00880085);
+ }
+
+ /*
+ * Note: While running skiboot on Firebird-L, I have
+ * to print something or wait for a while. The root
+ * cause wasn't identified yet.
+ */
+ time_wait_ms(100);
+
+ /* Init_3: Upbound Credit Config */
+ REGW(0x3c00c8, 0x0303060403030000);
+ /* Init_4: Credit Init Timer */
+ REGW(0x3c00e8, 0x00000000000000FF);
+
+ /* Init_4.1: BI Ack Timing */
+ REGW(0x3c00e8, 0x0000FC0000000000);
+ /* Init_5: Ordering Override 0*/
+ REGW(0x3c0200, 0x0000000000000000);
+ /* Init_6: Ordering Override 1*/
+ REGW(0x3c0208, 0x0000000000000000);
+
+ /*** Downbound TTYPE table ***/
+
+ /* Init_7: Enable sequence / speculation for CI Loads */
+ REGW(0x3c00a8, 0x0000000000000004);
+ /* Init_8: */
+ REGW(0x3c00b0, 0x700800C000000000);
+ /* Init_9: Enable sequence / speculation for CI Stores */
+ REGW(0x3c00a8, 0x0000000000000005);
+ /* Init_10: */
+ REGW(0x3c00b0, 0x704820C000000000);
+ /* Init_11: Enable speculation for EOI */
+ REGW(0x3c00a8, 0x000000000000001B);
+ /* Init_12: */
+ REGW(0x3c00b0, 0x3590204000000000);
+ /* Init_13: ENable speculation for DMA Rd Responses */
+ REGW(0x3c00a8, 0x0000000000000020);
+ /* Init_14: */
+ REGW(0x3c00b0, 0x1103C4C000000000);
+ /* Init_15: Enable sequence for DMA RWNITC */
+ REGW(0x3c00a8, 0x0000000000000001);
+ /* Init_16: */
+ REGW(0x3c00b0, 0xC000000000000000);
+ /* Init_17: Enable sequence for IOKill */
+ REGW(0x3c00a8, 0x0000000000000009);
+ /* Init_18: */
+ REGW(0x3c00b0, 0x4208210000000000);
+ /* Init_19: Enable sequence for IOKill */
+ REGW(0x3c00a8, 0x000000000000000A);
+ /* Init_20: */
+ REGW(0x3c00b0, 0x4200210000000000);
+ /* Init_21: Enable sequence for FMTC CI Store w/Kill */
+ REGW(0x3c00a8, 0x0000000000000021);
+
+ /*** Timer controls ***/
+
+ /* Init_22: */
+ REGW(0x3c00b0, 0x4200300000000000);
+ /* Init_23: Dnbound timer mask */
+ REGW(0x3c0190, 0x0040000000000000);
+ /* Init_24: Upbound timer mask 0 */
+ REGW(0x3c0180, 0x0010001000100010);
+ /* Init_25: Upbound timer mask 1 */
+ REGW(0x3c0188, 0x0010000000000000);
+ /* Init_26: Credit sync check config */
+ REGW(0x3c00f0, 0xC102000000000000);
+
+ /*** Setup trace ***/
+
+ /* Init_27: DBG stop trace */
+ REGW(0x3c0410, 0x4000000000000000);
+ /* Init_28: DBG control */
+ REGW(0x3c0400, 0x0000000000000000);
+ /* Init_29: DBG Mode */
+ REGW(0x3c0408, 0xA0000000F0CC3300);
+ /* Init_29a: DBG C0 (Stop on Error) */
+ REGW(0x3c0418, 0xF4F00FFF00000000);
+ /* Init_30: DBG pre-mux select */
+ REGW(0x3c0478, 0x0023000000000000);
+ /* Init_31: CA0 mode */
+ REGW(0x3c04b0, 0x8000000000000000);
+ /* Init_32: CA0 Compression 0 */
+ REGW(0x3c04b8, 0x0000000000000000);
+ /* Init_33: CA0 Compression 1 */
+ REGW(0x3c04c0, 0x0000000000000000);
+ /* Init_34: CA0 Pattern A march (cmd1 selected val) */
+ REGW(0x3c0480, 0x008000007FFFFF00);
+ /* Init_35: CA0 Trigger 0 definition (pattern A) */
+ REGW(0x3c04a0, 0x8000000000000000);
+ /* Init_36: CA1 mode */
+ REGW(0x3c0530, 0x8000000000000000);
+ /* Init_37: CA1 Compression 0 */
+ REGW(0x3c0538, 0x0000000000000000);
+ /* Init_38: CA1 Compression 1 */
+ REGW(0x3c0540, 0x0000000000000000);
+ /* Init_39: CA2 mode */
+ REGW(0x3c05b0, 0x8000000000000000);
+ /* Init_40: CA2 Compression 0 */
+ REGW(0x3c05b8, 0x0000000000000000);
+ /* Init_41: CA2 Compression 1 */
+ REGW(0x3c05c0, 0x0000000000000000);
+ /* Init_42: CA3 Mode */
+ REGW(0x3c0630, 0x8000000000000000);
+ /* Init_43: CA3 Compression 0 */
+ REGW(0x3c0638, 0x0000000000000000);
+ /* Init_44: CA3 Compression 1 */
+ REGW(0x3c0640, 0x0000000000000000);
+ /* Init_45: CA3 Pattern A match (AIB val) */
+ REGW(0x3c0600, 0x80000100FFFEFF00);
+ /* Init_46: CA3 Trigger 0 definition (pattern A) */
+ REGW(0x3c0620, 0x8000000000000000);
+ /* Init_47: DBG unfreeze trace */
+ REGW(0x3c0410, 0x1000000000000000);
+ /* Init_48: DBG start trace */
+ REGW(0x3c0410, 0x8000000000000000);
+
+ /*** AIB Port Config ***/
+
+ /* Init_49: AIB Port Information */
+ REGW(0x3c00d0, 0x0888888800000000);
+ /* Init_50: Port Ordering controls */
+ REGW(0x3c0200, 0x0000000000000000);
+
+ /*** LEMs (need to match recov. tables) ***/
+
+ /* Init_51: Clear upbound LEM */
+ REGW(0x3c0000, 0x0000000000000000);
+ /* Init_52: Clear upbound WOF */
+ REGW(0x3c0040, 0x0000000000000000);
+ /* Init_53: Clear Dnbound LEM */
+ REGW(0x3c0050, 0x0000000000000000);
+ /* Init_54: Clear Dnbound WOF */
+ REGW(0x3c0090, 0x0000000000000000);
+ /* Init_55: Clear Fences */
+ REGW(0x3c0130, 0x0000000000000000);
+ /* Init_56: Clear Erpt latches */
+ REGW(0x3c0148, 0x0080000000000000);
+ /* Init_57: Set Upbound LEM Action0 */
+ REGW(0x3c0030, 0x0800000000800000);
+ /* Init_58: Set Upbound LEN Action1 */
+ REGW(0x3c0038, 0x0000000000000000);
+ /* Init_59: Set Upbound LEM Mask (AND write) */
+ REGW(0x3c0020, 0x0800000000000000);
+ /* Init_60: Set Dnbound LEM Action0 */
+ REGW(0x3c0080, 0x2000080CA07FFF40);
+ /* Init_61: Set Dnbound LEM Action1 */
+ REGW(0x3c0088, 0x0000000000000000);
+ /* Init_62: Set Dnbound LEM Mask (AND write) */
+ REGW(0x3c0070, 0x00000800200FFE00);
+
+ /*** Setup Fences (need to match recov. tables) ***/
+
+ /* Init_63: Set Upbound Damage Control 0 (GX Err) */
+ REGW(0x3c0100, 0xF7FFFFFFFF7FFFFF);
+ /* Init_64: Set Upbound Damage Control 1 (AIB Fence) */
+ REGW(0x3c0108, 0xF7FFFFFFFF7FFFFF);
+ /* Init_65: Set Upbound Damage Control 2 (Drop Pkt) */
+ REGW(0x3c0110, 0x0010054000000000);
+ /* Init_66: Set Dnbound Damage Control 0 (GX Err) */
+ REGW(0x3c0118, 0xDFFFF7F35F8000BF);
+ /* Init_67: Set Dnbound Damage Control 1 (AIB Fence) */
+ REGW(0x3c0120, 0xDFFFF7F35F8000BF);
+ /* Init_68: Set Dnbound Damage Control 2 (Drop Pkt) */
+ REGW(0x3c0128, 0x0000000C00000000);
+}
+
+static void p7ioc_init_MISC_HSS(struct p7ioc *ioc)
+{
+ unsigned int i, regbase;
+
+ printf("P7IOC: Init HSS...\n");
+
+ /* Note: These values might need to be tweaked per system and
+ * per physical port depending on electrical characteristics.
+ *
+ * For now we stick to the defaults provided by the spec.
+ */
+ for (i = 0; i < P7IOC_NUM_PHBS; i++) {
+ regbase = P7IOC_HSS_BASE + i * P7IOC_HSS_STRIDE;
+
+ if (!p7ioc_phb_enabled(ioc, i))
+ continue;
+
+ /* Init_1: HSSn CTL2 */
+ REGW(regbase + P7IOC_HSSn_CTL2_OFFSET, 0xFFFF6DB6DB000000);
+ /* Init_2: HSSn CTL3 */
+ REGW(regbase + P7IOC_HSSn_CTL3_OFFSET, 0x1130000320000000);
+ /* Init_3: HSSn CTL8 */
+ REGW(regbase + P7IOC_HSSn_CTL8_OFFSET, 0xDDDDDDDD00000000);
+
+#if 0 /* All these remain set to the values configured by the FSP */
+ /* Init_4: HSSn CTL9 */
+ REGW(regbase + P7IOC_HSSn_CTL9_OFFSET, 0x9999999900000000);
+ /* Init_5: HSSn CTL10 */
+ REGW(regbase + P7IOC_HSSn_CTL10_OFFSET, 0x8888888800000000);
+ /* Init_6: HSSn CTL11 */
+ REGW(regbase + P7IOC_HSSn_CTL11_OFFSET, 0x4444444400000000);
+ /* Init_7: HSSn CTL12 */
+ REGW(regbase + P7IOC_HSSn_CTL12_OFFSET, 0x3333333300000000);
+ /* Init_8: HSSn CTL13 */
+ REGW(regbase + P7IOC_HSSn_CTL13_OFFSET, 0x2222222200000000);
+ /* Init_9: HSSn CTL14 */
+ REGW(regbase + P7IOC_HSSn_CTL14_OFFSET, 0x1111111100000000);
+ /* Init_10: HSSn CTL15 */
+ REGW(regbase + P7IOC_HSSn_CTL15_OFFSET, 0x1111111100000000);
+ /* Init_11: HSSn CTL16 */
+ REGW(regbase + P7IOC_HSSn_CTL16_OFFSET, 0x9999999900000000);
+ /* Init_12: HSSn CTL17 */
+ REGW(regbase + P7IOC_HSSn_CTL17_OFFSET, 0x8888888800000000);
+ /* Init_13: HSSn CTL18 */
+ REGW(regbase + P7IOC_HSSn_CTL18_OFFSET, 0xDDDDDDDD00000000);
+ /* Init_14: HSSn CTL19 */
+ REGW(regbase + P7IOC_HSSn_CTL19_OFFSET, 0xCCCCCCCC00000000);
+ /* Init_15: HSSn CTL20 */
+ REGW(regbase + P7IOC_HSSn_CTL20_OFFSET, 0xBBBBBBBB00000000);
+ /* Init_16: HSSn CTL21 */
+ REGW(regbase + P7IOC_HSSn_CTL21_OFFSET, 0x9999999900000000);
+ /* Init_17: HSSn CTL22 */
+ REGW(regbase + P7IOC_HSSn_CTL22_OFFSET, 0x8888888800000000);
+ /* Init_18: HSSn CTL23 */
+ REGW(regbase + P7IOC_HSSn_CTL23_OFFSET, 0x7777777700000000);
+#endif
+ }
+}
+
+static void p7ioc_init_RGC(struct p7ioc *ioc)
+{
+ unsigned int i;
+ uint64_t val, cfg;
+
+ printf("P7IOC: Init RGC...\n");
+
+ /*** Clear ERPT Macros ***/
+
+ /* Init_1: RGC Configuration reg */
+ cfg = REGR(0x3e1c08);
+ REGW(0x3e1c08, cfg | PPC_BIT(1));
+ time_wait_ms(1);
+
+ /* Init_2: RGC Configuration reg */
+ REGW(0x3e1c08, cfg);
+
+ /*** Set LEM regs (needs to match recov. code) */
+
+ /* Init_3: LEM FIR Accumulator */
+ REGW(0x3e1e00, 0x0000000000000000);
+ /* Init_4: LEM Action 0 */
+ REGW(0x3e1e30, 0x0FFF791F0B030000);
+ /* Init_5: LEN Action 1 */
+ REGW(0x3e1e38, 0x0000000000000000);
+ /* Init_6: LEM WOF */
+ REGW(0x3e1e40, 0x0000000000000000);
+ /* Init_7: LEM Mask Reg (AND write) */
+ REGW(0x3e1e20, 0x0FFF001F03030000);
+
+ /*** Set GEM regs (masks still on, no irpts can occur yet) ***/
+
+ /* Init_8: GEM XFIR */
+ REGW(0x3e0008, 0x0000000000000000);
+ /* Init_9: GEM WOF */
+ REGW(0x3e0028, 0x0000000000000000);
+
+ /*** Set Damage Controls (needs to match recov.) ***/
+
+ /* Init_10: LDCP */
+ REGW(0x3e1c18, 0xF00086C0B4FCFFFF);
+
+ /*** Read status (optional) ***/
+
+ /* Init_11: Read status */
+ val = REGR(0x3e1c10);
+ printf("P7IOC: Init_11 Status: %016llx\n", val);
+
+ /*** Set running configuration **/
+
+ /* Init_12: Configuration reg (modes, values, timers) */
+ REGW(0x3e1c08, 0x10000077CE100000);
+
+ /* Init_13: Cmd/Dat Crd Allocation */
+ REGW(0x3e1c20, 0x00000103000700FF);
+ /* Init_14: GP reg - disable errs, wrap, stop_trc */
+ REGW(0x3e1018, 0x0000000000000000);
+ /* Init_15: Configuration reg (start init timers) */
+ cfg = REGR(0x3e1c08);
+ REGW(0x3e1c08, cfg | 0x00003f0000000000);
+
+ /*** Setup interrupts ***/
+
+ /* Init_16: BUID Register
+ *
+ * XXX NOTE: This needs to be clarified. According to the doc
+ * the register contains a 9-bit BUID, which makes sense so far.
+ *
+ * However, the initialization sequence says "depends on which
+ * GX bus) which doesn't since afaik the GX bus number is encoded
+ * in the BUID Extension bit which is right *above* the 9-bit
+ * BUID in the interrupt message.
+ *
+ * So I must be missing something here... For now I'll just
+ * write my 9-bit BUID and we'll see what happens.
+ *
+ */
+ REGW(0x3e1800, (uint64_t)ioc->rgc_buid << PPC_BITLSHIFT(31));
+
+ /* Init_17: Supposed to lock the IODA table but we aren't racing
+ * with anybody so there is little point.
+ *
+ * Note: If/when we support some kind of error recovery that
+ * involves re-initializing the IOC, then we might have
+ * to take some locks but it's assumed that the necessary
+ * lock(s) will be obtained by the caller.
+ */
+ //REGR(0x3e1840, 0x0000000000000000);
+
+ /* Init_18: IODA Table Addr: Select IST*/
+ REGW(0x3e1820, 0x8001000000000000);
+ /* Init_19: IODA Table Data: IRPT 0 */
+ REGW(0x3e1830, 0x0000000000000000);
+ /* Init_20: IODA Table Data: IRPT 1 */
+ REGW(0x3e1830, 0x0000000000000000);
+ /* Init_21: IODA Table Addr: Select HRT */
+ REGW(0x3e1820, 0x8000000000000000);
+ /* Init_22: IODA Table Data: HRT
+ *
+ * XXX Figure out what this actually is and what value should
+ * we use. For now, do like BML and use 0
+ */
+ for (i = 0; i < 4; i++)
+ REGW(0x3e1830, 0x0000000000000000);
+
+ /* Init_23: IODA Table Addr: select XIVT */
+ REGW(0x3e1820, 0x8002000000000000);
+ /* Init_24: IODA Table Data: Mask all interrupts */
+ for (i = 0; i < 16; i++)
+ REGW(0x3e1830, 0x000000ff00000000);
+
+ /* Init_25: Clear table lock if any was stale */
+ REGW(0x3e1840, 0x0000000000000000);
+
+ /* Init_32..37: Set the PHB AIB addresses. We configure those
+ * to the values recommended in the p7IOC doc.
+ *
+ * XXX NOTE: I cannot find a documentation for these, I assume
+ * they just take the full 64-bit address, but we may want to
+ * dbl check just in case (it seems to be what BML does but
+ * I'm good at mis-reading Milton's Perl).
+ */
+ for (i = 0; i < P7IOC_NUM_PHBS; i++) {
+ if (!p7ioc_phb_enabled(ioc, i))
+ continue;
+ REGW(0x3e1080 + (i << 3),
+ ioc->mmio1_win_start + PHBn_AIB_BASE(i));
+ }
+}
+
+static void p7ioc_init_ci_routing(struct p7ioc *ioc)
+{
+ unsigned int i, j = 0;
+ uint64_t rmatch[47];
+ uint64_t rmask[47];
+ uint64_t pmask;
+
+ /* Init_130: clear all matches (except 47 which routes to the RGC) */
+ for (i = 0; i < 47; i++) {
+ rmatch[i] = REGR(P7IOC_CI_RMATC_REG(i)) &
+ ~(P7IOC_CI_RMATC_ADDR_VALID |
+ P7IOC_CI_RMATC_BUID_VALID |
+ P7IOC_CI_RMATC_TYPE_VALID);
+ rmask[i] = 0;
+ REGW(P7IOC_CI_RMATC_REG(i), rmatch[i]);
+ }
+
+ /* Init_131...224: configure routing for everything except RGC
+ *
+ * We are using a slightly different routing setup than the
+ * example to make the code easier. We configure all PHB
+ * routing entries by doing all of PHB0 first, then all of PHB1
+ * etc...
+ *
+ * Then we append everything else except the RGC itself which
+ * remains hard wired at entry 47. So the unused entries live
+ * at 39..46.
+ *
+ * - 0 : PHB0 LSI BUID
+ * - 1 : PHB0 MSI BUID
+ * - 2 : PHB0 AIB Registers
+ * - 3 : PHB0 IO Space
+ * - 4 : PHB0 M32 Space
+ * - 5 : PHB0 M64 Space
+ * - 6..11 : PHB1
+ * - 12..17 : PHB2
+ * - 18..23 : PHB3
+ * - 24..29 : PHB4
+ * - 30..35 : PHB5
+ * - 36 : Invalidates broadcast (FMTC)
+ * - 37 : Interrupt response for RGC
+ * - 38 : RGC GEM BUID
+ * - 39..46 : Unused (alternate M64 ?)
+ * - 47 : RGC ASB Registers (catch all)
+ */
+
+ /* Helper macro to set a rule */
+#define CI_ADD_RULE(p, k, d, m) do { \
+ rmask[j] = P7IOC_CI_RMATC_ENCODE_##k(m); \
+ rmatch[j]= P7IOC_CI_RMATC_PORT(p) | \
+ P7IOC_CI_RMATC_##k##_VALID | \
+ P7IOC_CI_RMATC_ENCODE_##k(d); \
+ j++; \
+ } while (0)
+
+ pmask = 0;
+ for (i = 0; i < P7IOC_NUM_PHBS; i++) {
+ unsigned int buid_base = ioc->buid_base + PHBn_BUID_BASE(i);
+
+ if (!p7ioc_phb_enabled(ioc, i))
+ continue;
+
+ /* LSI BUIDs, match all 9 bits (1 BUID per PHB) */
+ CI_ADD_RULE(P7IOC_CI_PHB_PORT(i), BUID,
+ buid_base + PHB_BUID_LSI_OFFSET, 0x1ff);
+
+ /* MSI BUIDs, match 4 bits (16 BUIDs per PHB) */
+ CI_ADD_RULE(P7IOC_CI_PHB_PORT(i), BUID,
+ buid_base + PHB_BUID_MSI_OFFSET, 0x1f0);
+
+ /* AIB reg space */
+ CI_ADD_RULE(P7IOC_CI_PHB_PORT(i), ADDR,
+ ioc->mmio1_win_start + PHBn_AIB_BASE(i),
+ ~(PHBn_AIB_SIZE - 1));
+
+ /* IO space */
+ CI_ADD_RULE(P7IOC_CI_PHB_PORT(i), ADDR,
+ ioc->mmio1_win_start + PHBn_IO_BASE(i),
+ ~(PHB_IO_SIZE - 1));
+
+ /* M32 space */
+ CI_ADD_RULE(P7IOC_CI_PHB_PORT(i), ADDR,
+ ioc->mmio2_win_start + PHBn_M32_BASE(i),
+ ~(PHB_M32_SIZE - 1));
+
+ /* M64 space */
+ CI_ADD_RULE(P7IOC_CI_PHB_PORT(i), ADDR,
+ ioc->mmio2_win_start + PHBn_M64_BASE(i),
+ ~(PHB_M64_SIZE - 1));
+
+ /* For use with invalidate bcasts */
+ pmask |= P7IOC_CI_PHB_PORT(i);
+ }
+
+ /* Invalidates broadcast to all PHBs */
+ CI_ADD_RULE(pmask, TYPE, 0x80, 0xf0);
+
+ /* Interrupt responses go to RGC */
+ CI_ADD_RULE(P7IOC_CI_RGC_PORT, TYPE, 0x60, 0xf0);
+
+ /* RGC GEM BUID (1 BUID) */
+ CI_ADD_RULE(P7IOC_CI_RGC_PORT, BUID, ioc->rgc_buid, 0x1ff);
+
+ /* Program the values masks first */
+ for (i = 0; i < 47; i++)
+ REGW(P7IOC_CI_RMASK_REG(i), rmask[i]);
+ for (i = 0; i < 47; i++)
+ REGW(P7IOC_CI_RMATC_REG(i), rmatch[i]);
+
+ /* Init_225: CI Match 47 (Configure RGC catch all) */
+ REGW(P7IOC_CI_RMASK_REG(47), 0x0000000000000000);
+ REGW(P7IOC_CI_RMATC_REG(47), 0x4000800000000000);
+
+#ifdef DUMP_CI_ROUTING
+ printf("P7IOC: CI Routing table:\n");
+ for (i = 0; i < 48; i++)
+ printf(" [%.2d] MTCH: %016llx MSK: %016llx\n", i,
+ REGR(P7IOC_CI_RMATC_REG(i)),
+ REGR(P7IOC_CI_RMASK_REG(i)));
+#endif /* DUMP_CI_ROUTING */
+}
+
+static void p7ioc_init_CI(struct p7ioc *ioc)
+{
+ printf("P7IOC: Init CI...\n");
+
+ /*** Clear ERPT macros ***/
+
+ /* XXX NOTE: The doc seems to also provide "alternate freq ratio"
+ * settings. Not sure what they are about, let's stick to the
+ * original values for now.
+ */
+
+ /* Init_1: CI Port 0 Configuration */
+ REGW(0x3d0000, 0x420000C0073F0002);
+ /* Init_2: CI Port 0 Configuration */
+ REGW(0x3d0000, 0x020000C0073F0002);
+ /* Init_3: CI Port 1 Configuration */
+ REGW(0x3d1000, 0x42000FCF07200002);
+ /* Init_4: CI Port 1 Configuration */
+ REGW(0x3d1000, 0x02000FCF07200002);
+ /* Init_5: CI Port 2 Configuration */
+ REGW(0x3d2000, 0x420000C307200002);
+ /* Init_6: CI Port 2 Configuration */
+ REGW(0x3d2000, 0x020000C307200002);
+ /* Init_7: CI Port 3 Configuration */
+ REGW(0x3d3000, 0x420000C307200002);
+ /* Init_8: CI Port 3 Configuration */
+ REGW(0x3d3000, 0x020000C307200002);
+ /* Init_9: CI Port 4 Configuration */
+ REGW(0x3d4000, 0x420000C307200002);
+ /* Init_10: CI Port 4 Configuration */
+ REGW(0x3d4000, 0x020000C307200002);
+ /* Init_11: CI Port 5 Configuration */
+ REGW(0x3d5000, 0x420000C307200002);
+ /* Init_12: CI Port 5 Configuration */
+ REGW(0x3d5000, 0x020000C307200002);
+ /* Init_13: CI Port 6 Configuration */
+ REGW(0x3d6000, 0x420000C307200002);
+ /* Init_14: CI Port 6 Configuration */
+ REGW(0x3d6000, 0x020000C307200002);
+ /* Init_15: CI Port 7 Configuration */
+ REGW(0x3d7000, 0x420000C307200002);
+ /* Init_16: CI Port 7 Configuration */
+ REGW(0x3d7000, 0x020000C307200002);
+
+ /*** Set LEM regs (need to match recov.) ***/
+
+ /* Init_17: CI Port 0 LEM FIR Accumulator */
+ REGW(0x3d0200, 0x0000000000000000);
+ /* Init_18: CI Port 0 LEM Action 0 */
+ REGW(0x3d0230, 0x0A00000000000000);
+ /* Init_19: CI Port 0 LEM Action 1 */
+ REGW(0x3d0238, 0x0000000000000000);
+ /* Init_20: CI Port 0 LEM WOF */
+ REGW(0x3d0240, 0x0000000000000000);
+ /* Init_21: CI Port 0 LEM Mask (AND write) */
+ REGW(0x3d0220, 0x0200000000000000);
+ /* Init_22: CI Port 1 LEM FIR Accumularor */
+ REGW(0x3d1200, 0x0000000000000000);
+ /* Init_23: CI Port 1 LEM Action 0 */
+ REGW(0x3d1230, 0x0000000000000000);
+ /* Init_24: CI Port 1 LEM Action 1 */
+ REGW(0x3d1238, 0x0000000000000000);
+ /* Init_25: CI Port 1 LEM WOF */
+ REGW(0x3d1240, 0x0000000000000000);
+ /* Init_26: CI Port 1 LEM Mask (AND write) */
+ REGW(0x3d1220, 0x0000000000000000);
+ /* Init_27: CI Port 2 LEM FIR Accumulator */
+ REGW(0x3d2200, 0x0000000000000000);
+ /* Init_28: CI Port 2 LEM Action 0 */
+ REGW(0x3d2230, 0xA4F4000000000000);
+ /* Init_29: CI Port 2 LEM Action 1 */
+ REGW(0x3d2238, 0x0000000000000000);
+ /* Init_30: CI Port 2 LEM WOF */
+ REGW(0x3d2240, 0x0000000000000000);
+ /* Init_31: CI Port 2 LEM Mask (AND write) */
+ REGW(0x3d2220, 0x0000000000000000);
+ /* Init_32: CI Port 3 LEM FIR Accumulator */
+ REGW(0x3d3200, 0x0000000000000000);
+ /* Init_33: CI Port 3 LEM Action 0 */
+ REGW(0x3d3230, 0xA4F4000000000000);
+ /* Init_34: CI Port 3 LEM Action 1 */
+ REGW(0x3d3238, 0x0000000000000000);
+ /* Init_35: CI Port 3 LEM WOF */
+ REGW(0x3d3240, 0x0000000000000000);
+ /* Init_36: CI Port 3 LEM Mask (AND write) */
+ REGW(0x3d3220, 0x0000000000000000);
+ /* Init_37: CI Port 4 LEM FIR Accumulator */
+ REGW(0x3d4200, 0x0000000000000000);
+ /* Init_38: CI Port 4 Action 0 */
+ REGW(0x3d4230, 0xA4F4000000000000);
+ /* Init_39: CI Port 4 Action 1 */
+ REGW(0x3d4238, 0x0000000000000000);
+ /* Init_40: CI Port 4 WOF */
+ REGW(0x3d4240, 0x0000000000000000);
+ /* Init_41: CI Port 4 Mask (AND write) */
+ REGW(0x3d4220, 0x0000000000000000);
+ /* Init_42: CI Port 5 LEM FIR Accumulator */
+ REGW(0x3d5200, 0x0000000000000000);
+ /* Init_43: CI Port 5 Action 0 */
+ REGW(0x3d5230, 0xA4F4000000000000);
+ /* Init_44: CI Port 5 Action 1 */
+ REGW(0x3d5238, 0x0000000000000000);
+ /* Init_45: CI Port 4 WOF */
+ REGW(0x3d5240, 0x0000000000000000);
+ /* Init_46: CI Port 5 Mask (AND write) */
+ REGW(0x3d5220, 0x0000000000000000);
+ /* Init_47: CI Port 6 LEM FIR Accumulator */
+ REGW(0x3d6200, 0x0000000000000000);
+ /* Init_48: CI Port 6 Action 0 */
+ REGW(0x3d6230, 0xA4F4000000000000);
+ /* Init_49: CI Port 6 Action 1 */
+ REGW(0x3d6238, 0x0000000000000000);
+ /* Init_50: CI Port 6 WOF */
+ REGW(0x3d6240, 0x0000000000000000);
+ /* Init_51: CI Port 6 Mask (AND write) */
+ REGW(0x3d6220, 0x0000000000000000);
+ /* Init_52: CI Port 7 LEM FIR Accumulator */
+ REGW(0x3d7200, 0x0000000000000000);
+ /* Init_53: CI Port 7 Action 0 */
+ REGW(0x3d7230, 0xA4F4000000000000);
+ /* Init_54: CI Port 7 Action 1 */
+ REGW(0x3d7238, 0x0000000000000000);
+ /* Init_55: CI Port 7 WOF */
+ REGW(0x3d7240, 0x0000000000000000);
+ /* Init_56: CI Port 7 Mask (AND write) */
+ REGW(0x3d7220, 0x0000000000000000);
+
+ /*** Set Damage Controls (need match recov.) ***/
+
+ /* Init_57: CI Port 0 LDCP*/
+ REGW(0x3d0010, 0x421A0000000075FF);
+ /* Init_58: CI Port 1 LDCP */
+ REGW(0x3d1010, 0x421A000000007FFF);
+ /* Init_59: CI Port 2 LDCP */
+ REGW(0x3d2010, 0x421A24F400005B0B);
+ /* Init_60: CI Port 3 LDCP */
+ REGW(0x3d3010, 0x421A24F400005B0B);
+ /* Init_61: CI Port 4 LDCP */
+ REGW(0x3d4010, 0x421A24F400005B0B);
+ /* Init_62: CI Port 5 LDCP */
+ REGW(0x3d5010, 0x421A24F400005B0B);
+ /* Init_63: CI Port 6 LDCP */
+ REGW(0x3d6010, 0x421A24F400005B0B);
+ /* Init_64: CI Port 7 LDCP */
+ REGW(0x3d7010, 0x421A24F400005B0B);
+
+ /*** Setup Trace 0 ***/
+
+ /* Init_65: CI Trc 0 DBG - Run/Status (stop trace) */
+ REGW(0x3d0810, 0x5000000000000000);
+ /* Init_66: CI Trc 0 DBG - Mode (not cross trig CA's) */
+ REGW(0x3d0808, 0xB0000000F0000000);
+ /* Init_66a: CI Trc 0 DBG - C0 (stop on error) */
+ REGW(0x3d0818, 0xF4F00FFF00000000);
+ /* Init_67: CI Trc 0 DBG - Select (port 0 mode 2) */
+ REGW(0x3d0878, 0x0002000000000000);
+ /* Init_68: CI Trc 0 CA0 - Pattern A (RX cmd val) */
+ REGW(0x3d0880, 0xC0200000DFFFFF00);
+ /* Init_69: CI Trc 0 CA0 - Trigger 0 (Pattern A) */
+ REGW(0x3d08a0, 0x8000000000000000);
+ /* Init_70: CI Trc 0 - Mode */
+ REGW(0x3d08b0, 0x8000000000000000);
+ /* Init_71: CI Trc 0 CA1 - Pattern A (TX cmd val) */
+ REGW(0x3d0900, 0xC0200000DFFFFF00);
+ /* Init_72: CI Trc 0 CA1 - Trigger 0 (Pattern A) */
+ REGW(0x3d0920, 0x8000000000000000);
+ /* Init_73: CI Trc 0 CA1 - Mode */
+ REGW(0x3d0930, 0x8000000000000000);
+ /* Init_74: CI Trc 0 DBG - Run/Status (start trace) */
+ REGW(0x3d0810, 0x8000000000000000);
+
+ /*** Setup Trace 1 ***/
+
+ /* Init_75: CI Trc 1 DBG - Run/Status (stop trace) */
+ REGW(0x3d0c10, 0x5000000000000000);
+ /* Init_76: CI Trc 1 DBG - Mode (not cross trig CA's) */
+ REGW(0x3d0c08, 0xB0000000F0000000);
+ /* Init_76a: CI Trc 1 DBG - C0 (stop on error) */
+ REGW(0x3d0c18, 0xF4F00FFF00000000);
+ /* Init_77: CI Trc 1 DBG - Select (port 1 mode 2) */
+ REGW(0x3d0c78, 0x0102000000000000);
+ /* Init_78: CI Trc 1 CA0 - Pattern A (RX cmd val) */
+ REGW(0x3d0c80, 0xC0200000DFFFFF00);
+ /* Init_79: CI Trc 1 CA0 - Trigger 0 (Pattern A) */
+ REGW(0x3d0ca0, 0x8000000000000000);
+ /* Init_80: CI Trc 1 CA0 - Mode */
+ REGW(0x3d0cb0, 0x8000000000000000);
+ /* Init_81: CI Trc 1 CA1 - Pattern A (TX cmd val) */
+ REGW(0x3d0d00, 0xC0200000DFFFFF00);
+ /* Init_82: CI Trc 1 CA1 - Trigger 0 (Pattern A) */
+ REGW(0x3d0d20, 0x8000000000000000);
+ /* Init_83: CI Trc 1 CA1 - Mode */
+ REGW(0x3d0d30, 0x8000000000000000);
+ /* Init_84: CI Trc 1 DBG - Run/Status (start trace) */
+ REGW(0x3d0c10, 0x8000000000000000);
+
+ /* Init_85...92:
+ *
+ * XXX NOTE: Here we normally read the Port 0 to 7 status regs
+ * which is optional. Eventually we might want to do it to check
+ * if the status matches expectations
+ *
+ * (regs 0x3d0008 to 0x3d7008)
+ */
+
+ /*** Set buffer allocations (credits) ***/
+
+ /* Init_93: CI Port 0 Rx Cmd Buffer Allocation */
+ REGW(0x3d0050, 0x0808040400000000);
+ /* Init_94: CI Port 0 Rx Dat Buffer Allocation */
+ REGW(0x3d0060, 0x0006000200000000);
+ /* Init_95: CI Port 1 Tx Cmd Buffer Allocation */
+ REGW(0x3d1030, 0x0000040400000000);
+ /* Init_96: CI Port 1 Tx Dat Buffer Allocation */
+ REGW(0x3d1040, 0x0000004800000000);
+ /* Init_97: CI Port 1 Rx Cmd Buffer Allocation */
+ REGW(0x3d1050, 0x0008000000000000);
+ /* Init_98: CI Port 1 Rx Dat Buffer Allocation */
+ REGW(0x3d1060, 0x0048000000000000);
+ /* Init_99: CI Port 2 Tx Cmd Buffer Allocation */
+ REGW(0x3d2030, 0x0808080800000000);
+ /* Init_100: CI Port 2 Tx Dat Buffer Allocation */
+ REGW(0x3d2040, 0x0086008200000000);
+ /* Init_101: CI Port 2 Rx Cmd Buffer Allocation */
+ REGW(0x3d2050, 0x0808080800000000);
+ /* Init_102: CI Port 2 Rx Dat Buffer Allocation */
+ REGW(0x3d2060, 0x8648000000000000);
+ /* Init_103: CI Port 3 Tx Cmd Buffer Allocation */
+ REGW(0x3d3030, 0x0808080800000000);
+ /* Init_104: CI Port 3 Tx Dat Buffer Allocation */
+ REGW(0x3d3040, 0x0086008200000000);
+ /* Init_105: CI Port 3 Rx Cmd Buffer Allocation */
+ REGW(0x3d3050, 0x0808080800000000);
+ /* Init_106: CI Port 3 Rx Dat Buffer Allocation */
+ REGW(0x3d3060, 0x8648000000000000);
+ /* Init_107: CI Port 4 Tx Cmd Buffer Allocation */
+ REGW(0x3d4030, 0x0808080800000000);
+ /* Init_108: CI Port 4 Tx Dat Buffer Allocation */
+ REGW(0x3d4040, 0x0086008200000000);
+ /* Init_109: CI Port 4 Rx Cmd Buffer Allocation */
+ REGW(0x3d4050, 0x0808080800000000);
+ /* Init_110: CI Port 4 Rx Dat Buffer Allocation */
+ REGW(0x3d4060, 0x8648000000000000);
+ /* Init_111: CI Port 5 Tx Cmd Buffer Allocation */
+ REGW(0x3d5030, 0x0808080800000000);
+ /* Init_112: CI Port 5 Tx Dat Buffer Allocation */
+ REGW(0x3d5040, 0x0086008200000000);
+ /* Init_113: CI Port 5 Rx Cmd Buffer Allocation */
+ REGW(0x3d5050, 0x0808080800000000);
+ /* Init_114: CI Port 5 Rx Dat Buffer Allocation */
+ REGW(0x3d5060, 0x8648000000000000);
+ /* Init_115: CI Port 6 Tx Cmd Buffer Allocation */
+ REGW(0x3d6030, 0x0808080800000000);
+ /* Init_116: CI Port 6 Tx Dat Buffer Allocation */
+ REGW(0x3d6040, 0x0086008200000000);
+ /* Init_117: CI Port 6 Rx Cmd Buffer Allocation */
+ REGW(0x3d6050, 0x0808080800000000);
+ /* Init_118: CI Port 6 Rx Dat Buffer Allocation */
+ REGW(0x3d6060, 0x8648000000000000);
+ /* Init_119: CI Port 7 Tx Cmd Buffer Allocation */
+ REGW(0x3d7030, 0x0808080800000000);
+ /* Init_120: CI Port 7 Tx Dat Buffer Allocation */
+ REGW(0x3d7040, 0x0086008200000000);
+ /* Init_121: CI Port 7 Rx Cmd Buffer Allocation */
+ REGW(0x3d7050, 0x0808080800000000);
+ /* Init_122: CI Port 6 Rx Dat Buffer Allocation */
+ REGW(0x3d7060, 0x8648000000000000);
+
+ /*** Channel ordering ***/
+
+ /* Init_123: CI Port 1 Ordering */
+ REGW(0x3d1070, 0x73D0735E00000000);
+ /* Init_124: CI Port 2 Ordering */
+ REGW(0x3d2070, 0x73D0735E00000000);
+ /* Init_125: CI Port 3 Ordering */
+ REGW(0x3d3070, 0x73D0735E00000000);
+ /* Init_126: CI Port 4 Ordering */
+ REGW(0x3d4070, 0x73D0735E00000000);
+ /* Init_127: CI Port 5 Ordering */
+ REGW(0x3d5070, 0x73D0735E00000000);
+ /* Init_128: CI Port 6 Ordering */
+ REGW(0x3d6070, 0x73D0735E00000000);
+ /* Init_129: CI POrt 7 Ordering */
+ REGW(0x3d7070, 0x73D0735E00000000);
+
+ /*** Setup routing (port 0 only) */
+
+ p7ioc_init_ci_routing(ioc);
+
+ /*** Set Running Configuration/Crd Init Timers ***
+ *
+ * XXX NOTE: Supposed to only modify bits 8:15
+ */
+
+ /* Init_226: CI Port 1 Configuration */
+ REGW(0x3d1000, 0x023F0FCF07200002);
+ /* Init_227: CI Port 2 Configuration */
+ REGW(0x3d2000, 0x023F00C307200002);
+ /* Init_228: CI Port 3 Configuration */
+ REGW(0x3d3000, 0x023F00C307200002);
+ /* Init_229: CI Port 4 Configuration */
+ REGW(0x3d4000, 0x023F00C307200002);
+ /* Init_230: CI Port 5 Configuration */
+ REGW(0x3d5000, 0x023F00C307200002);
+ /* Init_231: CI Port 6 Configuration */
+ REGW(0x3d6000, 0x023F00C307200002);
+ /* Init_232: CI Port 7 Configuration */
+ REGW(0x3d7000, 0x023F00C307200002);
+ /* Init_233: CI Port 0 Configuration */
+ REGW(0x3d0000, 0x023F00C0073F0002);
+}
+
+static void p7ioc_init_PHBs(struct p7ioc *ioc)
+{
+ unsigned int i;
+
+ printf("P7IOC: Init PHBs...\n");
+
+ /* We use the same reset sequence that we use for
+ * fast reboot for consistency
+ */
+ for (i = 0; i < P7IOC_NUM_PHBS; i++) {
+ if (p7ioc_phb_enabled(ioc, i))
+ p7ioc_phb_reset(&ioc->phbs[i].phb);
+ }
+}
+
+static void p7ioc_init_MISC(struct p7ioc *ioc)
+{
+ printf("P7IOC: Init MISC...\n");
+
+ /*** Set LEM regs ***/
+
+ /* Init_1: LEM FIR Accumulator */
+ REGW(0x3ea000, 0x0000000000000000);
+ /* Init_2: LEM Action 0 */
+ REGW(0x3ea030, 0xFFFFFFFCEE3FFFFF);
+ /* Init_3: LEM Action 1 */
+ REGW(0x3ea038, 0x0000000001C00000);
+ /* Init_4: LEM WOF */
+ REGW(0x3ea040, 0x0000000000000000);
+ /* Init_5: LEM Mask (AND write) */
+ REGW(0x3ea020, 0x000F03F0CD3FFFFF);
+ /* Init_5.1: I2C LEM FIR Accumulator */
+ REGW(0x3eb000, 0x0000000000000000);
+ /* Init_5.2: I2C LEM Action 0 */
+ REGW(0x3eb030, 0xEE00000000000000);
+ /* Init_5.3: I2C LEM Action 1 */
+ REGW(0x3eb038, 0x0000000000000000);
+ /* Init_5.4: I2C LEM WOF */
+ REGW(0x3eb040, 0x0000000000000000);
+ /* Init_5.5: I2C LEM Mask (AND write) */
+ REGW(0x3eb020, 0x4600000000000000);
+
+ /*** Set RGC GP bits (error enables) ***/
+
+ /* Init_7: RGC GP0 control (enable umux errors) */
+ REGW(0x3e1018, 0x8888880000000000);
+
+ /*** Central Trace Setup ***
+ *
+ * By default trace 4 PHBs Rx/Tx, but this can be changed
+ * for debugging purposes
+ */
+
+ /* Init_8: */
+ REGW(0x3ea810, 0x5000000000000000);
+ /* Init_9: */
+ REGW(0x3ea800, 0x0000000000000000);
+ /* Init_10: */
+ REGW(0x3ea808, 0xB0000000F0000000);
+ /* Init_11: */
+ REGW(0x3ea818, 0xF4F00FFF00000000);
+ /* Init_12: */
+ REGW(0x3ea820, 0x0000000000000000);
+ /* Init_13: */
+ REGW(0x3ea828, 0x0000000000000000);
+ /* Init_14: */
+ REGW(0x3ea830, 0x0000000000000000);
+ /* Init_15: */
+ REGW(0x3ea838, 0x0000000000000000);
+ /* Init_16: */
+ REGW(0x3ea840, 0x0000000000000000);
+ /* Init_17: */
+ REGW(0x3ea878, 0x0300000000000000);
+
+ /* Init_18: PHB0 mux select (Rx/Tx) */
+ REGW(0x000F80, 0x0000000000000000);
+ /* Init_19: PHB1 mux select (Rx/Tx) */
+ REGW(0x010F80, 0x0000000000000000);
+ /* Init_19.0: PHB2 mux select (Rx/Tx) */
+ REGW(0x020F80, 0x0000000000000000);
+ /* Init_19.1: PHB3 mux select (Rx/Tx) */
+ REGW(0x030F80, 0x0000000000000000);
+ /* Init_19.2: PHB4 mux select (Rx/Tx) */
+ REGW(0x040F80, 0x0000000000000000);
+ /* Init_19.3: PHB5 mux select (Rx/Tx) */
+ REGW(0x050F80, 0x0000000000000000);
+
+ /* Init_20: */
+ REGW(0x3ea880, 0x40008000FF7F0000);
+ /* Init_21: */
+ REGW(0x3ea888, 0x0000000000000000);
+ /* Init_22: */
+ REGW(0x3ea890, 0x0000000000000000);
+ /* Init_23: */
+ REGW(0x3ea898, 0x0000000000000000);
+ /* Init_24: */
+ REGW(0x3ea8a0, 0x8000000000000000);
+ /* Init_25: */
+ REGW(0x3ea8a8, 0x0000000000000000);
+ /* Init_26: */
+ REGW(0x3ea8b0, 0x8000000000000000);
+ /* Init_27: */
+ REGW(0x3ea8b8, 0x0000000000000000);
+ /* Init_28: */
+ REGW(0x3ea8c0, 0x0000000000000000);
+ /* Init_29: */
+ REGW(0x3ea900, 0x40008000FF7F0000);
+ /* Init_30: */
+ REGW(0x3ea908, 0x0000000000000000);
+ /* Init_31: */
+ REGW(0x3ea910, 0x0000000000000000);
+ /* Init_32: */
+ REGW(0x3ea918, 0x0000000000000000);
+ /* Init_33: */
+ REGW(0x3ea920, 0x8000000000000000);
+ /* Init_34: */
+ REGW(0x3ea928, 0x0000000000000000);
+ /* Init_35: */
+ REGW(0x3ea930, 0x8000000000000000);
+ /* Init_36: */
+ REGW(0x3ea938, 0x0000000000000000);
+ /* Init_37: */
+ REGW(0x3ea940, 0x0000000000000000);
+ /* Init_38: */
+ REGW(0x3ea980, 0x40008000FF7F0000);
+ /* Init_39: */
+ REGW(0x3ea988, 0x0000000000000000);
+ /* Init_40: */
+ REGW(0x3ea990, 0x0000000000000000);
+ /* Init_41: */
+ REGW(0x3ea998, 0x0000000000000000);
+ /* Init_42: */
+ REGW(0x3ea9a0, 0x8000000000000000);
+ /* Init_43: */
+ REGW(0x3ea9a8, 0x0000000000000000);
+ /* Init_44: */
+ REGW(0x3ea9b0, 0x8000000000000000);
+ /* Init_45: */
+ REGW(0x3ea9b8, 0x0000000000000000);
+ /* Init_46: */
+ REGW(0x3ea9c0, 0x0000000000000000);
+ /* Init_47: */
+ REGW(0x3eaa00, 0x40008000FF7F0000);
+ /* Init_48: */
+ REGW(0x3eaa08, 0x0000000000000000);
+ /* Init_49: */
+ REGW(0x3eaa10, 0x0000000000000000);
+ /* Init_50: */
+ REGW(0x3eaa18, 0x0000000000000000);
+ /* Init_51: */
+ REGW(0x3eaa20, 0x8000000000000000);
+ /* Init_52: */
+ REGW(0x3eaa28, 0x0000000000000000);
+ /* Init_53: */
+ REGW(0x3eaa30, 0x8000000000000000);
+ /* Init_54: */
+ REGW(0x3eaa38, 0x0000000000000000);
+ /* Init_55: */
+ REGW(0x3eaa40, 0x0000000000000000);
+ /* Init_56: */
+ REGW(0x3ea810, 0x1000000000000000);
+ /* Init_57: */
+ REGW(0x3ea810, 0x8000000000000000);
+
+ /*** I2C Master init fixup */
+
+ /* Init_58: I2C Master Operation Control */
+ REGW(0x3eb0a8, 0x8100000000000000);
+}
+
+static void p7ioc_init_GEM(struct p7ioc *ioc)
+{
+ printf("P7IOC: Init GEM...\n");
+
+ /*** Check for errors */
+
+ /* XXX TODO */
+#if 0
+ /* Init_1: */
+ REGR(0x3e0008, 0);
+ /* Init_2: */
+ REGR(0x3e0010, 0);
+ /* Init_3: */
+ REGR(0x3e0018, 0);
+#endif
+
+ /*** Get ready for new errors, allow interrupts ***
+ *
+ * XXX: Need to leave all unused port masked to prevent
+ * invalid errors
+ */
+
+ /* Init_4: GEM XFIR */
+ REGW(0x3e0008, 0x0000000000000000);
+ /* Init_5: GEM Mask (See FIXME) */
+ REGW(0x3e0020, 0x000F033FFFFFFFFF);
+ /* Init_6: GEM WOF */
+ REGW(0x3e0028, 0x0000000000000000);
+}
+
+int64_t p7ioc_inits(struct p7ioc *ioc)
+{
+ p7ioc_init_BI(ioc);
+ p7ioc_init_MISC_HSS(ioc);
+ p7ioc_init_RGC(ioc);
+ p7ioc_init_CI(ioc);
+ p7ioc_init_PHBs(ioc);
+ p7ioc_init_MISC(ioc);
+ p7ioc_init_GEM(ioc);
+
+ return OPAL_SUCCESS;
+
+}
+
+void p7ioc_reset(struct io_hub *hub)
+{
+ struct p7ioc *ioc = iohub_to_p7ioc(hub);
+ unsigned int i;
+
+ /* We could do a full cold reset of P7IOC but for now, let's
+ * not bother and just try to clean up the interrupts as best
+ * as possible
+ */
+
+ /* XXX TODO: RGC interrupts */
+
+ printf("P7IOC: Clearing IODA...\n");
+
+ /* First clear all IODA tables and wait a bit */
+ for (i = 0; i < 6; i++) {
+ if (p7ioc_phb_enabled(ioc, i))
+ p7ioc_phb_reset(&ioc->phbs[i].phb);
+ }
+}
diff --git a/hw/p7ioc-phb.c b/hw/p7ioc-phb.c
new file mode 100644
index 0000000..8dc7616
--- /dev/null
+++ b/hw/p7ioc-phb.c
@@ -0,0 +1,3206 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <p7ioc.h>
+#include <p7ioc-regs.h>
+#include <io.h>
+#include <timebase.h>
+#include <affinity.h>
+#include <pci.h>
+#include <pci-cfg.h>
+#include <interrupts.h>
+#include <opal.h>
+#include <ccan/str/str.h>
+
+static void p7ioc_phb_trace(struct p7ioc_phb *p, FILE *s, const char *fmt, ...)
+__attribute__ ((format (printf, 3, 4)));
+
+static void p7ioc_phb_trace(struct p7ioc_phb *p, FILE *s, const char *fmt, ...)
+{
+ /* Use a temp stack buffer to print all at once to avoid
+ * mixups of a trace entry on SMP
+ */
+ char tbuf[128 + 10];
+ va_list args;
+ char *b = tbuf;
+
+ b += sprintf(b, "PHB%d: ", p->phb.opal_id);
+ va_start(args, fmt);
+ vsnprintf(b, 128, fmt, args);
+ va_end(args);
+ fputs(tbuf, s);
+}
+#define PHBDBG(p, fmt...) p7ioc_phb_trace(p, stdout, fmt)
+#define PHBERR(p, fmt...) p7ioc_phb_trace(p, stderr, fmt)
+
+/* Helper to select an IODA table entry */
+static inline void p7ioc_phb_ioda_sel(struct p7ioc_phb *p, uint32_t table,
+ uint32_t addr, bool autoinc)
+{
+ out_be64(p->regs + PHB_IODA_ADDR,
+ (autoinc ? PHB_IODA_AD_AUTOINC : 0) |
+ SETFIELD(PHB_IODA_AD_TSEL, 0ul, table) |
+ SETFIELD(PHB_IODA_AD_TADR, 0ul, addr));
+}
+
+/* Helper to set the state machine timeout */
+static inline uint64_t p7ioc_set_sm_timeout(struct p7ioc_phb *p, uint64_t dur)
+{
+ uint64_t target, now = mftb();
+
+ target = now + dur;
+ if (target == 0)
+ target++;
+ p->delay_tgt_tb = target;
+
+ return dur;
+}
+
+/*
+ * Lock callbacks. Allows the OPAL API handlers to lock the
+ * PHB around calls such as config space, EEH, etc...
+ */
+static void p7ioc_phb_lock(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+
+ lock(&p->lock);
+}
+
+static void p7ioc_phb_unlock(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+
+ unlock(&p->lock);
+}
+
+static bool p7ioc_phb_fenced(struct p7ioc_phb *p)
+{
+ struct p7ioc *ioc = p->ioc;
+ uint64_t fence, fbits;
+
+ fbits = 0x0003000000000000 >> (p->index * 4);
+ fence = in_be64(ioc->regs + P7IOC_CHIP_FENCE_SHADOW);
+
+ return (fence & fbits) != 0;
+}
+
+/*
+ * Configuration space access
+ *
+ * The PHB lock is assumed to be already held
+ */
+static int64_t p7ioc_pcicfg_check(struct p7ioc_phb *p, uint32_t bdfn,
+ uint32_t offset, uint32_t size)
+{
+ uint32_t sm = size - 1;
+
+ if (offset > 0xfff || bdfn > 0xffff)
+ return OPAL_PARAMETER;
+ if (offset & sm)
+ return OPAL_PARAMETER;
+
+ /* The root bus only has a device at 0 and we get into an
+ * error state if we try to probe beyond that, so let's
+ * avoid that and just return an error to Linux
+ */
+ if ((bdfn >> 8) == 0 && (bdfn & 0xff))
+ return OPAL_HARDWARE;
+
+ /* Check PHB state */
+ if (p->state == P7IOC_PHB_STATE_BROKEN)
+ return OPAL_HARDWARE;
+
+ return OPAL_SUCCESS;
+}
+
+#define P7IOC_PCI_CFG_READ(size, type) \
+static int64_t p7ioc_pcicfg_read##size(struct phb *phb, uint32_t bdfn, \
+ uint32_t offset, type *data) \
+{ \
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb); \
+ uint64_t addr; \
+ void *base = p->regs; \
+ int64_t rc; \
+ \
+ /* Initialize data in case of error */ \
+ *data = (type)0xffffffff; \
+ \
+ rc = p7ioc_pcicfg_check(p, bdfn, offset, sizeof(type)); \
+ if (rc) \
+ return rc; \
+ \
+ if (p7ioc_phb_fenced(p)) { \
+ if (!(p->flags & P7IOC_PHB_CFG_USE_ASB)) \
+ return OPAL_HARDWARE; \
+ \
+ base = p->regs_asb; \
+ } else if ((p->flags & P7IOC_PHB_CFG_BLOCKED) && bdfn != 0) { \
+ return OPAL_HARDWARE; \
+ } \
+ \
+ addr = PHB_CA_ENABLE | ((uint64_t)bdfn << PHB_CA_FUNC_LSH); \
+ addr = SETFIELD(PHB_CA_REG, addr, offset); \
+ out_be64(base + PHB_CONFIG_ADDRESS, addr); \
+ *data = in_le##size(base + PHB_CONFIG_DATA + \
+ (offset & (4 - sizeof(type)))); \
+ \
+ return OPAL_SUCCESS; \
+}
+
+#define P7IOC_PCI_CFG_WRITE(size, type) \
+static int64_t p7ioc_pcicfg_write##size(struct phb *phb, uint32_t bdfn, \
+ uint32_t offset, type data) \
+{ \
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb); \
+ void *base = p->regs; \
+ uint64_t addr; \
+ int64_t rc; \
+ \
+ rc = p7ioc_pcicfg_check(p, bdfn, offset, sizeof(type)); \
+ if (rc) \
+ return rc; \
+ \
+ if (p7ioc_phb_fenced(p)) { \
+ if (!(p->flags & P7IOC_PHB_CFG_USE_ASB)) \
+ return OPAL_HARDWARE; \
+ \
+ base = p->regs_asb; \
+ } else if ((p->flags & P7IOC_PHB_CFG_BLOCKED) && bdfn != 0) { \
+ return OPAL_HARDWARE; \
+ } \
+ \
+ addr = PHB_CA_ENABLE | ((uint64_t)bdfn << PHB_CA_FUNC_LSH); \
+ addr = SETFIELD(PHB_CA_REG, addr, offset); \
+ out_be64(base + PHB_CONFIG_ADDRESS, addr); \
+ out_le##size(base + PHB_CONFIG_DATA + \
+ (offset & (4 - sizeof(type))), data); \
+ \
+ return OPAL_SUCCESS; \
+}
+
+P7IOC_PCI_CFG_READ(8, uint8_t)
+P7IOC_PCI_CFG_READ(16, uint16_t)
+P7IOC_PCI_CFG_READ(32, uint32_t)
+P7IOC_PCI_CFG_WRITE(8, uint8_t)
+P7IOC_PCI_CFG_WRITE(16, uint16_t)
+P7IOC_PCI_CFG_WRITE(32, uint32_t)
+
+static int64_t p7ioc_presence_detect(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+
+ /* XXX Test for PHB in error state ? */
+
+ if (reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)
+ return OPAL_SHPC_DEV_PRESENT;
+
+ return OPAL_SHPC_DEV_NOT_PRESENT;
+}
+
+static int64_t p7ioc_link_state(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ uint16_t lstat;
+ int64_t rc;
+
+ /* XXX Test for PHB in error state ? */
+
+ /* Link is up, let's find the actual speed */
+ if (!(reg & PHB_PCIE_DLP_TC_DL_LINKACT))
+ return OPAL_SHPC_LINK_DOWN;
+
+ rc = p7ioc_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_LSTAT,
+ &lstat);
+ if (rc < 0) {
+ /* Shouldn't happen */
+ PHBERR(p, "Failed to read link status\n");
+ return OPAL_HARDWARE;
+ }
+ if (!(lstat & PCICAP_EXP_LSTAT_DLLL_ACT))
+ return OPAL_SHPC_LINK_DOWN;
+
+ return GETFIELD(PCICAP_EXP_LSTAT_WIDTH, lstat);
+}
+
+static int64_t p7ioc_sm_freset(struct p7ioc_phb *p)
+{
+ uint64_t reg;
+ uint32_t cfg32;
+ uint64_t ci_idx = p->index + 2;
+
+ switch(p->state) {
+ case P7IOC_PHB_STATE_FUNCTIONAL:
+ /* If the slot isn't present, we needn't do it */
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+ if (!(reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)) {
+ PHBDBG(p, "Slot freset: no device\n");
+ return OPAL_CLOSED;
+ }
+
+ /* Mask PCIE port interrupts and AER receiver error */
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7E00000000000000);
+ p7ioc_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
+ cfg32 |= PCIECAP_AER_CE_RECVR_ERR;
+ p7ioc_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, cfg32);
+
+ /* Mask CI port error and clear it */
+ out_be64(p->ioc->regs + P7IOC_CIn_LEM_ERR_MASK(ci_idx),
+ 0xa4f4000000000000ul);
+ out_be64(p->regs + PHB_LEM_ERROR_MASK,
+ 0xadb650c9808dd051ul);
+ out_be64(p->ioc->regs + P7IOC_CIn_LEM_FIR(ci_idx),
+ 0x0ul);
+
+ /* Disable link to avoid training issues */
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ reg |= PHB_PCIE_DLP_TCTX_DISABLE;
+ out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg);
+ PHBDBG(p, "Slot freset: disable link training\n");
+
+ p->state = P7IOC_PHB_STATE_FRESET_DISABLE_LINK;
+ p->retries = 12;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ case P7IOC_PHB_STATE_FRESET_DISABLE_LINK:
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (reg & PHB_PCIE_DLP_TCRX_DISABLED) {
+ /* Turn on freset */
+ reg = in_be64(p->regs + PHB_RESET);
+ reg &= ~0x2000000000000000ul;
+ out_be64(p->regs + PHB_RESET, reg);
+ PHBDBG(p, "Slot freset: assert\n");
+
+ p->state = P7IOC_PHB_STATE_FRESET_ASSERT_DELAY;
+ return p7ioc_set_sm_timeout(p, secs_to_tb(1));
+ }
+
+ if (p->retries-- == 0) {
+ PHBDBG(p, "Slot freset: timeout to disable link training\n");
+ goto error;
+ }
+
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ case P7IOC_PHB_STATE_FRESET_ASSERT_DELAY:
+ /* Turn off freset */
+ reg = in_be64(p->regs + PHB_RESET);
+ reg |= 0x2000000000000000ul;
+ out_be64(p->regs + PHB_RESET, reg);
+ PHBDBG(p, "Slot freset: deassert\n");
+
+ p->state = P7IOC_PHB_STATE_FRESET_DEASSERT_DELAY;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(200));
+ case P7IOC_PHB_STATE_FRESET_DEASSERT_DELAY:
+ /* Restore link control */
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ reg &= ~PHB_PCIE_DLP_TCTX_DISABLE;
+ out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg);
+ PHBDBG(p, "Slot freset: enable link training\n");
+
+ p->state = P7IOC_PHB_STATE_FRESET_WAIT_LINK;
+ p->retries = 100;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ case P7IOC_PHB_STATE_FRESET_WAIT_LINK:
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (reg & PHB_PCIE_DLP_TC_DL_LINKACT) {
+ /*
+ * Clear spurious errors and enable PCIE port
+ * interrupts
+ */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS,
+ 0x00E0000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN,
+ 0xFE65000000000000);
+
+ /* Clear AER receiver error status */
+ p7ioc_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_STATUS,
+ PCIECAP_AER_CE_RECVR_ERR);
+ /* Unmask receiver error status in AER */
+ p7ioc_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
+ cfg32 &= ~PCIECAP_AER_CE_RECVR_ERR;
+ p7ioc_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, cfg32);
+ /* Clear and Unmask CI port and PHB errors */
+ out_be64(p->ioc->regs + P7IOC_CIn_LEM_FIR(ci_idx),
+ 0x0ul);
+ out_be64(p->regs + PHB_LEM_FIR_ACCUM,
+ 0x0ul);
+ out_be64(p->ioc->regs + P7IOC_CIn_LEM_ERR_MASK_AND(ci_idx),
+ 0x0ul);
+ out_be64(p->regs + PHB_LEM_ERROR_MASK,
+ 0x1249a1147f500f2cul);
+ PHBDBG(p, "Slot freset: link up!\n");
+
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ p->flags &= ~P7IOC_PHB_CFG_BLOCKED;
+ return OPAL_SUCCESS;
+ }
+
+ if (p->retries-- == 0) {
+ uint16_t val;
+
+ if (p->gen == 1) {
+ PHBDBG(p, "Slot freset: timeout for link up in Gen1 mode!\n");
+ goto error;
+ }
+
+ PHBDBG(p, "Slot freset: timeout for link up.\n");
+ PHBDBG(p, "Slot freset: fallback to Gen1.\n");
+ p->gen --;
+
+ /* Limit speed to 2.5G */
+ p7ioc_pcicfg_read16(&p->phb, 0,
+ p->ecap + PCICAP_EXP_LCTL2, &val);
+ val = SETFIELD(PCICAP_EXP_LCTL2_TLSPD, val, 1);
+ p7ioc_pcicfg_write16(&p->phb, 0,
+ p->ecap + PCICAP_EXP_LCTL2,
+ val);
+
+ /* Retrain */
+ p7ioc_pcicfg_read16(&p->phb, 0,
+ p->ecap + PCICAP_EXP_LCTL, &val);
+ p7ioc_pcicfg_write16(&p->phb, 0,
+ p->ecap + PCICAP_EXP_LCTL,
+ val | PCICAP_EXP_LCTL_LINK_RETRAIN);
+
+ /* Enter FRESET_WAIT_LINK, again */
+ p->state = P7IOC_PHB_STATE_FRESET_WAIT_LINK;
+ p->retries = 100;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ }
+
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ default:
+ break;
+ }
+
+error:
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ return OPAL_HARDWARE;
+}
+
+static int64_t p7ioc_freset(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+
+ if (p->state != P7IOC_PHB_STATE_FUNCTIONAL)
+ return OPAL_HARDWARE;
+
+ p->flags |= P7IOC_PHB_CFG_BLOCKED;
+ return p7ioc_sm_freset(p);
+}
+
+static int64_t p7ioc_power_state(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+
+ /* XXX Test for PHB in error state ? */
+
+ if (reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
+ return OPAL_SHPC_POWER_ON;
+
+ return OPAL_SHPC_POWER_OFF;
+}
+
+static int64_t p7ioc_sm_slot_power_off(struct p7ioc_phb *p)
+{
+ uint64_t reg;
+
+ switch(p->state) {
+ case P7IOC_PHB_STATE_FUNCTIONAL:
+ /*
+ * Check the presence and power status. If be not
+ * be present or power down, we stop here.
+ */
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+ if (!(reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)) {
+ PHBDBG(p, "Slot power off: no device\n");
+ return OPAL_CLOSED;
+ }
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+ if (!(reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)) {
+ PHBDBG(p, "Slot power off: already off\n");
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ return OPAL_SUCCESS;
+ }
+
+ /*
+ * Mask PCIE port interrupt and turn power off
+ *
+ * We have to set bit 0 and clear it explicitly on PHB
+ * hotplug override register when doing power-off on the
+ * PHB slot. Otherwise, it won't take effect. That's the
+ * similar thing as we did for power-on.
+ */
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000);
+ reg = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
+ reg &= ~(0x8c00000000000000ul);
+ reg |= 0x8400000000000000ul;
+ out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg);
+ reg &= ~(0x8c00000000000000ul);
+ reg |= 0x0c00000000000000ul;
+ out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg);
+ PHBDBG(p, "Slot power off: powering off...\n");
+
+ p->state = P7IOC_PHB_STATE_SPDOWN_STABILIZE_DELAY;
+ return p7ioc_set_sm_timeout(p, secs_to_tb(2));
+ case P7IOC_PHB_STATE_SPDOWN_STABILIZE_DELAY:
+ /*
+ * The link should be stabilized after 2 seconds.
+ * We still need poll registers to make sure the
+ * power is really down every 1ms until limited
+ * 1000 times.
+ */
+ p->retries = 1000;
+ p->state = P7IOC_PHB_STATE_SPDOWN_SLOT_STATUS;
+ PHBDBG(p, "Slot power off: waiting for power off\n");
+ case P7IOC_PHB_STATE_SPDOWN_SLOT_STATUS:
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+ if (!(reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)) {
+ /*
+ * We completed the task. Clear link errors
+ * and restore PCIE port interrupts.
+ */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS,
+ 0x00E0000000000000ul);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN,
+ 0xFE65000000000000ul);
+
+ PHBDBG(p, "Slot power off: power off completely\n");
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ return OPAL_SUCCESS;
+ }
+
+ if (p->retries-- == 0) {
+ PHBERR(p, "Timeout powering off\n");
+ goto error;
+ }
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(1));
+ default:
+ break;
+ }
+
+error:
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ return OPAL_HARDWARE;
+}
+
+static int64_t p7ioc_slot_power_off(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+
+ if (p->state != P7IOC_PHB_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* run state machine */
+ return p7ioc_sm_slot_power_off(p);
+}
+
+static int64_t p7ioc_sm_slot_power_on(struct p7ioc_phb *p)
+{
+ uint64_t reg;
+ uint32_t reg32;
+ uint64_t ci_idx = p->index + 2;
+
+ switch(p->state) {
+ case P7IOC_PHB_STATE_FUNCTIONAL:
+ /* Check presence */
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+ if (!(reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)) {
+ PHBDBG(p, "Slot power on: no device\n");
+ return OPAL_CLOSED;
+ }
+
+ /* Adjust UTL interrupt settings to disable various
+ * errors that would interfere with the process
+ */
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e00000000000000);
+
+ /* If the power is not on, turn it on now */
+ if (!(reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)) {
+ /*
+ * The hotplug override register will not properly
+ * initiate the poweron sequence unless bit 0
+ * transitions from 0 to 1. Since it can already be
+ * set to 1 as a result of a previous power-on
+ * operation (even if the slot power is now off)
+ * we need to first clear it, then set it to 1 or
+ * nothing will happen
+ */
+ reg = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
+ reg &= ~(0x8c00000000000000ul);
+ out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg);
+ reg |= 0x8400000000000000ul;
+ out_be64(p->regs + PHB_HOTPLUG_OVERRIDE, reg);
+ p->state = P7IOC_PHB_STATE_SPUP_STABILIZE_DELAY;
+ PHBDBG(p, "Slot power on: powering on...\n");
+ return p7ioc_set_sm_timeout(p, secs_to_tb(2));
+ }
+ /* Power is already on */
+ power_ok:
+ /* Mask AER receiver error */
+ p7ioc_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, &reg32);
+ reg32 |= PCIECAP_AER_CE_RECVR_ERR;
+ p7ioc_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, reg32);
+
+ /* Mask CI port error and clear it */
+ out_be64(p->ioc->regs + P7IOC_CIn_LEM_ERR_MASK(ci_idx),
+ 0xa4f4000000000000ul);
+ out_be64(p->regs + PHB_LEM_ERROR_MASK,
+ 0xadb650c9808dd051ul);
+ out_be64(p->ioc->regs + P7IOC_CIn_LEM_FIR(ci_idx),
+ 0x0ul);
+
+ /* Disable link to avoid training issues */
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ reg |= PHB_PCIE_DLP_TCTX_DISABLE;
+ out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg);
+ PHBDBG(p, "Slot power on: disable link training\n");
+
+ /* Switch to state machine of fundamental reset */
+ p->state = P7IOC_PHB_STATE_FRESET_DISABLE_LINK;
+ p->retries = 12;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ case P7IOC_PHB_STATE_SPUP_STABILIZE_DELAY:
+ /* Come here after the 2s delay after power up */
+ p->retries = 1000;
+ p->state = P7IOC_PHB_STATE_SPUP_SLOT_STATUS;
+ PHBDBG(p, "Slot power on: waiting for power\n");
+ /* Fall through */
+ case P7IOC_PHB_STATE_SPUP_SLOT_STATUS:
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+
+ /* Doc says to check LED status, but we ignore that, there
+ * no point really and it's easier that way
+ */
+ if (reg & PHB_PCIE_SLOTCTL2_PWR_EN_STAT)
+ goto power_ok;
+ if (p->retries-- == 0) {
+ /* XXX Improve error logging */
+ PHBERR(p, "Timeout powering up slot\n");
+ goto error;
+ }
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ default:
+ break;
+ }
+
+ /* Unknown state, hardware error ? */
+ error:
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ return OPAL_HARDWARE;
+}
+
+static int64_t p7ioc_slot_power_on(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+
+ if (p->state != P7IOC_PHB_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* run state machine */
+ return p7ioc_sm_slot_power_on(p);
+}
+
+/*
+ * The OS is expected to do fundamental reset after complete
+ * reset to make sure the PHB could be recovered from the
+ * fenced state. However, the OS needn't do that explicitly
+ * since fundamental reset will be done automatically while
+ * powering on the PHB.
+ */
+static int64_t p7ioc_complete_reset(struct phb *phb, uint8_t assert)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ struct p7ioc *ioc = p->ioc;
+ uint64_t val64;
+
+ if (assert == OPAL_ASSERT_RESET) {
+ if (p->state != P7IOC_PHB_STATE_FUNCTIONAL &&
+ p->state != P7IOC_PHB_STATE_FENCED)
+ return OPAL_HARDWARE;
+
+ p->flags |= P7IOC_PHB_CFG_BLOCKED;
+ p7ioc_phb_reset(phb);
+
+ /*
+ * According to the experiment, we probably still have
+ * the fenced state with the corresponding PHB in the Fence
+ * WOF and we need clear that explicitly. Besides, the RGC
+ * might already have informational error and we should clear
+ * that explicitly as well. Otherwise, RGC XIVE#0 won't issue
+ * interrupt any more.
+ */
+ val64 = in_be64(ioc->regs + P7IOC_CHIP_FENCE_WOF);
+ val64 &= ~PPC_BIT(15 + p->index * 4);
+ out_be64(ioc->regs + P7IOC_CHIP_FENCE_WOF, val64);
+
+ /* Clear informational error from RGC */
+ val64 = in_be64(ioc->regs + P7IOC_RGC_LEM_BASE + P7IOC_LEM_WOF_OFFSET);
+ val64 &= ~PPC_BIT(18);
+ out_be64(ioc->regs + P7IOC_RGC_LEM_BASE + P7IOC_LEM_WOF_OFFSET, val64);
+ val64 = in_be64(ioc->regs + P7IOC_RGC_LEM_BASE + P7IOC_LEM_FIR_OFFSET);
+ val64 &= ~PPC_BIT(18);
+ out_be64(ioc->regs + P7IOC_RGC_LEM_BASE + P7IOC_LEM_FIR_OFFSET, val64);
+
+ return p7ioc_sm_slot_power_off(p);
+ } else {
+ if (p->state != P7IOC_PHB_STATE_FUNCTIONAL)
+ return OPAL_HARDWARE;
+
+ return p7ioc_sm_slot_power_on(p);
+ }
+
+ /* We shouldn't run to here */
+ return OPAL_PARAMETER;
+}
+
+/*
+ * We have to mask errors prior to disabling link training.
+ * Otherwise it would cause infinite frozen PEs. Also, we
+ * should have some delay after enabling link training. It's
+ * the conclusion from experiment and no document mentioned
+ * it.
+ */
+static int64_t p7ioc_sm_hot_reset(struct p7ioc_phb *p)
+{
+ uint64_t reg;
+ uint32_t cfg32;
+ uint16_t brctl;
+
+ switch(p->state) {
+ case P7IOC_PHB_STATE_FUNCTIONAL:
+ /* If the slot isn't present, we needn't do it */
+ reg = in_be64(p->regs + PHB_PCIE_SLOTCTL2);
+ if (!(reg & PHB_PCIE_SLOTCTL2_PRSTN_STAT)) {
+ PHBDBG(p, "Slot hot reset: no device\n");
+ return OPAL_CLOSED;
+ }
+
+ /* Mask PCIE port interrupts and AER receiver error */
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7E00000000000000);
+ p7ioc_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
+ cfg32 |= PCIECAP_AER_CE_RECVR_ERR;
+ p7ioc_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, cfg32);
+
+ /* Disable link to avoid training issues */
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ reg |= PHB_PCIE_DLP_TCTX_DISABLE;
+ out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg);
+ PHBDBG(p, "Slot hot reset: disable link training\n");
+
+ p->state = P7IOC_PHB_STATE_HRESET_DISABLE_LINK;
+ p->retries = 12;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ case P7IOC_PHB_STATE_HRESET_DISABLE_LINK:
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (reg & PHB_PCIE_DLP_TCRX_DISABLED) {
+ /* Turn on host reset */
+ p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
+ brctl |= PCI_CFG_BRCTL_SECONDARY_RESET;
+ p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
+ PHBDBG(p, "Slot hot reset: assert reset\n");
+
+ p->state = P7IOC_PHB_STATE_HRESET_DELAY;
+ return p7ioc_set_sm_timeout(p, secs_to_tb(1));
+ }
+
+ if (p->retries-- == 0) {
+ PHBDBG(p, "Slot hot reset: timeout to disable link training\n");
+ return OPAL_HARDWARE;
+ }
+
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ case P7IOC_PHB_STATE_HRESET_DELAY:
+ /* Turn off host reset */
+ p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
+ brctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
+ p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
+ PHBDBG(p, "Slot hot reset: deassert reset\n");
+
+ p->state = P7IOC_PHB_STATE_HRESET_ENABLE_LINK;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(200));
+ case P7IOC_PHB_STATE_HRESET_ENABLE_LINK:
+ /* Restore link control */
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ reg &= ~PHB_PCIE_DLP_TCTX_DISABLE;
+ out_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL, reg);
+ PHBDBG(p, "Slot hot reset: enable link training\n");
+
+ p->state = P7IOC_PHB_STATE_HRESET_WAIT_LINK;
+ p->retries = 100;
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ case P7IOC_PHB_STATE_HRESET_WAIT_LINK:
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (reg & PHB_PCIE_DLP_TC_DL_LINKACT) {
+ /*
+ * Clear spurious errors and enable PCIE port
+ * interrupts
+ */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0x00E0000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xFE65000000000000);
+
+ /* Clear AER receiver error status */
+ p7ioc_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_STATUS,
+ PCIECAP_AER_CE_RECVR_ERR);
+ /* Unmask receiver error status in AER */
+ p7ioc_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, &cfg32);
+ cfg32 &= ~PCIECAP_AER_CE_RECVR_ERR;
+ p7ioc_pcicfg_write32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_CE_MASK, cfg32);
+ PHBDBG(p, "Slot hot reset: link up!\n");
+
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ p->flags &= ~P7IOC_PHB_CFG_BLOCKED;
+ return OPAL_SUCCESS;
+ }
+
+ if (p->retries-- == 0) {
+ PHBDBG(p, "Slot hot reset: timeout for link up\n");
+ goto error;
+ }
+
+ return p7ioc_set_sm_timeout(p, msecs_to_tb(10));
+ default:
+ break;
+ }
+
+ /* Unknown state, hardware error ? */
+error:
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+ return OPAL_HARDWARE;
+}
+
+static int64_t p7ioc_hot_reset(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+
+ if (p->state != P7IOC_PHB_STATE_FUNCTIONAL)
+ return OPAL_HARDWARE;
+
+ p->flags |= P7IOC_PHB_CFG_BLOCKED;
+ return p7ioc_sm_hot_reset(p);
+}
+
+static int64_t p7ioc_poll(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t now = mftb();
+
+ if (p->state == P7IOC_PHB_STATE_FUNCTIONAL)
+ return OPAL_SUCCESS;
+
+ /* Check timer */
+ if (p->delay_tgt_tb &&
+ tb_compare(now, p->delay_tgt_tb) == TB_ABEFOREB)
+ return p->delay_tgt_tb - now;
+
+ /* Expired (or not armed), clear it */
+ p->delay_tgt_tb = 0;
+
+ /* Dispatch to the right state machine */
+ switch(p->state) {
+ case P7IOC_PHB_STATE_SPUP_STABILIZE_DELAY:
+ case P7IOC_PHB_STATE_SPUP_SLOT_STATUS:
+ return p7ioc_sm_slot_power_on(p);
+ case P7IOC_PHB_STATE_SPDOWN_STABILIZE_DELAY:
+ case P7IOC_PHB_STATE_SPDOWN_SLOT_STATUS:
+ return p7ioc_sm_slot_power_off(p);
+ case P7IOC_PHB_STATE_FRESET_DISABLE_LINK:
+ case P7IOC_PHB_STATE_FRESET_ASSERT_DELAY:
+ case P7IOC_PHB_STATE_FRESET_DEASSERT_DELAY:
+ case P7IOC_PHB_STATE_FRESET_WAIT_LINK:
+ return p7ioc_sm_freset(p);
+ case P7IOC_PHB_STATE_HRESET_DISABLE_LINK:
+ case P7IOC_PHB_STATE_HRESET_ASSERT:
+ case P7IOC_PHB_STATE_HRESET_DELAY:
+ case P7IOC_PHB_STATE_HRESET_ENABLE_LINK:
+ case P7IOC_PHB_STATE_HRESET_WAIT_LINK:
+ return p7ioc_sm_hot_reset(p);
+ default:
+ break;
+ }
+
+ /* Unknown state, could be a HW error */
+ return OPAL_HARDWARE;
+}
+
+static void p7ioc_eeh_read_phb_status(struct p7ioc_phb *p,
+ struct OpalIoP7IOCPhbErrorData *stat)
+{
+ bool locked;
+ uint16_t tmp16;
+ unsigned int i;
+
+ memset(stat, 0, sizeof(struct OpalIoP7IOCPhbErrorData));
+
+
+ /* Error data common part */
+ stat->common.version = OPAL_PHB_ERROR_DATA_VERSION_1;
+ stat->common.ioType = OPAL_PHB_ERROR_DATA_TYPE_P7IOC;
+ stat->common.len = sizeof(struct OpalIoP7IOCPhbErrorData);
+
+ /*
+ * We read some registers using config space through AIB.
+ *
+ * Get to other registers using ASB when possible to get to them
+ * through a fence if one is present.
+ *
+ * Note that the OpalIoP7IOCPhbErrorData has oddities, such as the
+ * bridge control being 32-bit and the UTL registers being 32-bit
+ * (which they really are, but they use the top 32-bit of a 64-bit
+ * register so we need to be a bit careful).
+ */
+
+ /* Use ASB to access PCICFG if the PHB has been fenced */
+ locked = lock_recursive(&p->lock);
+ p->flags |= P7IOC_PHB_CFG_USE_ASB;
+
+ /* Grab RC bridge control, make it 32-bit */
+ p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &tmp16);
+ stat->brdgCtl = tmp16;
+
+ /* Grab UTL status registers */
+ stat->portStatusReg = hi32(in_be64(p->regs_asb
+ + UTL_PCIE_PORT_STATUS));
+ stat->rootCmplxStatus = hi32(in_be64(p->regs_asb
+ + UTL_RC_STATUS));
+ stat->busAgentStatus = hi32(in_be64(p->regs_asb
+ + UTL_SYS_BUS_AGENT_STATUS));
+
+ /*
+ * Grab various RC PCIe capability registers. All device, slot
+ * and link status are 16-bit, so we grab the pair control+status
+ * for each of them
+ */
+ p7ioc_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_DEVCTL,
+ &stat->deviceStatus);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_SLOTCTL,
+ &stat->slotStatus);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_LCTL,
+ &stat->linkStatus);
+
+ /*
+ * I assume those are the standard config space header, cmd & status
+ * together makes 32-bit. Secondary status is 16-bit so I'll clear
+ * the top on that one
+ */
+ p7ioc_pcicfg_read32(&p->phb, 0, PCI_CFG_CMD, &stat->devCmdStatus);
+ p7ioc_pcicfg_read16(&p->phb, 0, PCI_CFG_SECONDARY_STATUS, &tmp16);
+ stat->devSecStatus = tmp16;
+
+ /* Grab a bunch of AER regs */
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_RERR_STA,
+ &stat->rootErrorStatus);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_UE_STATUS,
+ &stat->uncorrErrorStatus);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_STATUS,
+ &stat->corrErrorStatus);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG0,
+ &stat->tlpHdr1);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG1,
+ &stat->tlpHdr2);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG2,
+ &stat->tlpHdr3);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG3,
+ &stat->tlpHdr4);
+ p7ioc_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_SRCID,
+ &stat->sourceId);
+
+ /* Restore to AIB */
+ p->flags &= ~P7IOC_PHB_CFG_USE_ASB;
+ if (locked) {
+ unlock(&p->lock);
+ pci_put_phb(&p->phb);
+ }
+
+ /*
+ * No idea what that that is supposed to be, opal.h says
+ * "Record data about the call to allocate a buffer."
+ *
+ * Let's leave them alone for now...
+ *
+ * uint64_t errorClass;
+ * uint64_t correlator;
+ */
+
+ /* P7IOC MMIO Error Regs */
+ stat->p7iocPlssr = in_be64(p->regs_asb + PHB_CPU_LOADSTORE_STATUS);
+ stat->p7iocCsr = in_be64(p->regs_asb + PHB_DMA_CHAN_STATUS);
+ stat->lemFir = in_be64(p->regs_asb + PHB_LEM_FIR_ACCUM);
+ stat->lemErrorMask = in_be64(p->regs_asb + PHB_LEM_ERROR_MASK);
+ stat->lemWOF = in_be64(p->regs_asb + PHB_LEM_WOF);
+ stat->phbErrorStatus = in_be64(p->regs_asb + PHB_ERR_STATUS);
+ stat->phbFirstErrorStatus = in_be64(p->regs_asb + PHB_ERR1_STATUS);
+ stat->phbErrorLog0 = in_be64(p->regs_asb + PHB_ERR_LOG_0);
+ stat->phbErrorLog1 = in_be64(p->regs_asb + PHB_ERR_LOG_1);
+ stat->mmioErrorStatus = in_be64(p->regs_asb + PHB_OUT_ERR_STATUS);
+ stat->mmioFirstErrorStatus = in_be64(p->regs_asb + PHB_OUT_ERR1_STATUS);
+ stat->mmioErrorLog0 = in_be64(p->regs_asb + PHB_OUT_ERR_LOG_0);
+ stat->mmioErrorLog1 = in_be64(p->regs_asb + PHB_OUT_ERR_LOG_1);
+ stat->dma0ErrorStatus = in_be64(p->regs_asb + PHB_INA_ERR_STATUS);
+ stat->dma0FirstErrorStatus = in_be64(p->regs_asb + PHB_INA_ERR1_STATUS);
+ stat->dma0ErrorLog0 = in_be64(p->regs_asb + PHB_INA_ERR_LOG_0);
+ stat->dma0ErrorLog1 = in_be64(p->regs_asb + PHB_INA_ERR_LOG_1);
+ stat->dma1ErrorStatus = in_be64(p->regs_asb + PHB_INB_ERR_STATUS);
+ stat->dma1FirstErrorStatus = in_be64(p->regs_asb + PHB_INB_ERR1_STATUS);
+ stat->dma1ErrorLog0 = in_be64(p->regs_asb + PHB_INB_ERR_LOG_0);
+ stat->dma1ErrorLog1 = in_be64(p->regs_asb + PHB_INB_ERR_LOG_1);
+
+ /* Grab PESTA & B content */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, 0, true);
+ for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++)
+ stat->pestA[i] = in_be64(p->regs_asb + PHB_IODA_DATA0);
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, 0, true);
+ for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++)
+ stat->pestB[i] = in_be64(p->regs_asb + PHB_IODA_DATA0);
+}
+
+static int64_t p7ioc_eeh_freeze_status(struct phb *phb, uint64_t pe_number,
+ uint8_t *freeze_state,
+ uint16_t *pci_error_type,
+ uint16_t *severity,
+ uint64_t *phb_status)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t peev_bit = PPC_BIT(pe_number & 0x3f);
+ uint64_t peev, pesta, pestb;
+
+ /* Defaults: not frozen */
+ *freeze_state = OPAL_EEH_STOPPED_NOT_FROZEN;
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+
+ /* Check dead */
+ if (p->state == P7IOC_PHB_STATE_BROKEN) {
+ *freeze_state = OPAL_EEH_STOPPED_MMIO_DMA_FREEZE;
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ if (severity)
+ *severity = OPAL_EEH_SEV_PHB_DEAD;
+ goto bail;
+ }
+
+ /* Check fence */
+ if (p7ioc_phb_fenced(p)) {
+ /* Should be OPAL_EEH_STOPPED_TEMP_UNAVAIL ? */
+ *freeze_state = OPAL_EEH_STOPPED_MMIO_DMA_FREEZE;
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ if (severity)
+ *severity = OPAL_EEH_SEV_PHB_FENCED;
+ p->state = P7IOC_PHB_STATE_FENCED;
+ goto bail;
+ }
+
+ /* Check the PEEV */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
+ peev = in_be64(p->regs + PHB_IODA_DATA0);
+ if (pe_number > 63)
+ peev = in_be64(p->regs + PHB_IODA_DATA0);
+ if (!(peev & peev_bit))
+ return OPAL_SUCCESS;
+
+ /* Indicate that we have an ER pending */
+ p7ioc_phb_set_err_pending(p, true);
+ if (severity)
+ *severity = OPAL_EEH_SEV_PE_ER;
+
+ /* Read the PESTA & PESTB */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, pe_number, false);
+ pesta = in_be64(p->regs + PHB_IODA_DATA0);
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, pe_number, false);
+ pestb = in_be64(p->regs + PHB_IODA_DATA0);
+
+ /* Convert them */
+ if (pesta & IODA_PESTA_MMIO_FROZEN)
+ *freeze_state |= OPAL_EEH_STOPPED_MMIO_FREEZE;
+ if (pestb & IODA_PESTB_DMA_STOPPED)
+ *freeze_state |= OPAL_EEH_STOPPED_DMA_FREEZE;
+
+ /* XXX Handle more causes */
+ if (pesta & IODA_PESTA_MMIO_CAUSE)
+ *pci_error_type = OPAL_EEH_PE_MMIO_ERROR;
+ else
+ *pci_error_type = OPAL_EEH_PE_DMA_ERROR;
+
+ bail:
+ if (phb_status)
+ p7ioc_eeh_read_phb_status(p, (struct OpalIoP7IOCPhbErrorData *)
+ phb_status);
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_eeh_next_error(struct phb *phb, uint64_t *first_frozen_pe,
+ uint16_t *pci_error_type, uint16_t *severity)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ struct p7ioc *ioc = p->ioc;
+ uint64_t fir, peev0, peev1;
+ uint32_t cfg32, i;
+
+ /* Check if there're pending errors on the IOC. */
+ if (p7ioc_err_pending(ioc) &&
+ p7ioc_check_LEM(ioc, pci_error_type, severity))
+ return OPAL_SUCCESS;
+
+ /* Clear result */
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+ *first_frozen_pe = (uint64_t)-1;
+
+ /* Check dead */
+ if (p->state == P7IOC_PHB_STATE_BROKEN) {
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_PHB_DEAD;
+ return OPAL_SUCCESS;
+ }
+
+ /* Check fence */
+ if (p7ioc_phb_fenced(p)) {
+ /* Should be OPAL_EEH_STOPPED_TEMP_UNAVAIL ? */
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_PHB_FENCED;
+ p->state = P7IOC_PHB_STATE_FENCED;
+ p7ioc_phb_set_err_pending(p, false);
+ return OPAL_SUCCESS;
+ }
+
+ /*
+ * If we don't have pending errors, which might be moved
+ * from IOC to the PHB, then check if there has any frozen PEs.
+ */
+ if (!p7ioc_phb_err_pending(p)) {
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
+ peev0 = in_be64(p->regs + PHB_IODA_DATA0);
+ peev1 = in_be64(p->regs + PHB_IODA_DATA0);
+ if (peev0 || peev1) {
+ p->err.err_src = P7IOC_ERR_SRC_PHB0 + p->index;
+ p->err.err_class = P7IOC_ERR_CLASS_ER;
+ p->err.err_bit = 0;
+ p7ioc_phb_set_err_pending(p, true);
+ }
+ }
+
+ /* Check the pending errors, which might come from IOC */
+ if (p7ioc_phb_err_pending(p)) {
+ /*
+ * If the frozen PE is caused by a malfunctioning TLP, we
+ * need reset the PHB. So convert ER to PHB-fatal error
+ * for the case.
+ */
+ if (p->err.err_class == P7IOC_ERR_CLASS_ER) {
+ fir = in_be64(p->regs_asb + PHB_LEM_FIR_ACCUM);
+ if (fir & PPC_BIT(60)) {
+ p7ioc_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_UE_STATUS, &cfg32);
+ if (cfg32 & PCIECAP_AER_UE_MALFORMED_TLP)
+ p->err.err_class = P7IOC_ERR_CLASS_PHB;
+ }
+ }
+
+ /*
+ * Map P7IOC internal error class to that one OS can handle.
+ * For P7IOC_ERR_CLASS_ER, we also need figure out the frozen
+ * PE.
+ */
+ switch (p->err.err_class) {
+ case P7IOC_ERR_CLASS_PHB:
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_PHB_FENCED;
+ p7ioc_phb_set_err_pending(p, false);
+ break;
+ case P7IOC_ERR_CLASS_MAL:
+ case P7IOC_ERR_CLASS_INF:
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_INF;
+ p7ioc_phb_set_err_pending(p, false);
+ break;
+ case P7IOC_ERR_CLASS_ER:
+ *pci_error_type = OPAL_EEH_PE_ERROR;
+ *severity = OPAL_EEH_SEV_PE_ER;
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
+ peev0 = in_be64(p->regs + PHB_IODA_DATA0);
+ peev1 = in_be64(p->regs + PHB_IODA_DATA0);
+
+ for (i = 0 ; i < 64; i++) {
+ if (PPC_BIT(i) & peev1) {
+ *first_frozen_pe = i + 64;
+ break;
+ }
+ }
+ for (i = 0 ;
+ *first_frozen_pe == (uint64_t)-1 && i < 64;
+ i++) {
+ if (PPC_BIT(i) & peev0) {
+ *first_frozen_pe = i;
+ break;
+ }
+ }
+
+ /* No frozen PE? */
+ if (*first_frozen_pe == (uint64_t)-1) {
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+ p7ioc_phb_set_err_pending(p, false);
+ }
+
+ break;
+ default:
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+ p7ioc_phb_set_err_pending(p, false);
+ }
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static void p7ioc_ER_err_clear(struct p7ioc_phb *p)
+{
+ u64 err, lem;
+ u32 val;
+
+ /* Rec 1,2 */
+ lem = in_be64(p->regs + PHB_LEM_FIR_ACCUM);
+
+ /* Rec 3,4,5 AER registers (could use cfg space accessors) */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000001c00000000ull);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0x10000000);
+
+ /* Rec 6,7,8 XXX DOC whacks payload & req size ... we don't */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000005000000000ull);
+ val = in_be32(p->regs + PHB_CONFIG_DATA);
+ out_be32(p->regs + PHB_CONFIG_DATA, (val & 0xe0700000) | 0x0f000f00);
+
+ /* Rec 9,10,11 */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000010400000000ull);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
+
+ /* Rec 12,13,14 */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000011000000000ull);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
+
+ /* Rec 23,24,25 */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000013000000000ull);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
+
+ /* Rec 26,27,28 */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000004000000000ull);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0x470100f8);
+
+ /* Rec 29..34 UTL registers */
+ err = in_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS, err);
+ err = in_be64(p->regs + UTL_PCIE_PORT_STATUS);
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, err);
+ err = in_be64(p->regs + UTL_RC_STATUS);
+ out_be64(p->regs + UTL_RC_STATUS, err);
+
+ /* PHB error traps registers */
+ err = in_be64(p->regs + PHB_ERR_STATUS);
+ out_be64(p->regs + PHB_ERR_STATUS, err);
+ out_be64(p->regs + PHB_ERR1_STATUS, 0);
+ out_be64(p->regs + PHB_ERR_LOG_0, 0);
+ out_be64(p->regs + PHB_ERR_LOG_1, 0);
+
+ err = in_be64(p->regs + PHB_OUT_ERR_STATUS);
+ out_be64(p->regs + PHB_OUT_ERR_STATUS, err);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS, 0);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_0, 0);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_1, 0);
+
+ err = in_be64(p->regs + PHB_INA_ERR_STATUS);
+ out_be64(p->regs + PHB_INA_ERR_STATUS, err);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS, 0);
+ out_be64(p->regs + PHB_INA_ERR_LOG_0, 0);
+ out_be64(p->regs + PHB_INA_ERR_LOG_1, 0);
+
+ err = in_be64(p->regs + PHB_INB_ERR_STATUS);
+ out_be64(p->regs + PHB_INB_ERR_STATUS, err);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS, 0);
+ out_be64(p->regs + PHB_INB_ERR_LOG_0, 0);
+ out_be64(p->regs + PHB_INB_ERR_LOG_1, 0);
+
+ /* Rec 67, 68 LEM */
+ out_be64(p->regs + PHB_LEM_FIR_AND_MASK, ~lem);
+ out_be64(p->regs + PHB_LEM_WOF, 0);
+}
+
+static int64_t p7ioc_eeh_freeze_clear(struct phb *phb, uint64_t pe_number,
+ uint64_t eeh_action_token)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t peev0, peev1;
+
+ /* XXX Now this is a heavy hammer, coming roughly from the P7IOC doc
+ * and my old "pseudopal" code. It will need to be refined. In general
+ * error handling will have to be reviewed and probably done properly
+ * "from scratch" based on the description in the p7IOC spec.
+ *
+ * XXX Additionally, when handling interrupts, we might want to consider
+ * masking while processing and/or ack'ing interrupt bits etc...
+ */
+ u64 err;
+
+ /* Summary. If nothing, move to clearing the PESTs which can
+ * contain a freeze state from a previous error or simply set
+ * explicitly by the user
+ */
+ err = in_be64(p->regs + PHB_ETU_ERR_SUMMARY);
+ if (err == 0)
+ goto clear_pest;
+
+ p7ioc_ER_err_clear(p);
+
+ clear_pest:
+ /* XXX We just clear the whole PESTA for MMIO clear and PESTB
+ * for DMA clear. We might want to only clear the frozen bit
+ * as to not clobber the rest of the state. However, we expect
+ * the state to have been harvested before the clear operations
+ * so this might not be an issue
+ */
+ if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO) {
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, pe_number, false);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+ }
+ if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_DMA) {
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, pe_number, false);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+ }
+
+ /* Update ER pending indication */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
+ peev0 = in_be64(p->regs + PHB_IODA_DATA0);
+ peev1 = in_be64(p->regs + PHB_IODA_DATA0);
+ if (peev0 || peev1) {
+ p->err.err_src = P7IOC_ERR_SRC_PHB0 + p->index;
+ p->err.err_class = P7IOC_ERR_CLASS_ER;
+ p->err.err_bit = 0;
+ p7ioc_phb_set_err_pending(p, true);
+ } else
+ p7ioc_phb_set_err_pending(p, false);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_get_diag_data(struct phb *phb, void *diag_buffer,
+ uint64_t diag_buffer_len)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ struct OpalIoP7IOCPhbErrorData *diag = diag_buffer;
+
+ if (diag_buffer_len < sizeof(struct OpalIoP7IOCPhbErrorData))
+ return OPAL_PARAMETER;
+
+ /* Specific error data */
+ p7ioc_eeh_read_phb_status(p, diag);
+
+ /*
+ * We're running to here probably because of errors (MAL
+ * or INF class) from IOC. For the case, we need clear
+ * the pending errors and mask the error bit for MAL class
+ * error. Fortunately, we shouldn't get MAL class error from
+ * IOC on P7IOC.
+ */
+ if (p7ioc_phb_err_pending(p) &&
+ p->err.err_class == P7IOC_ERR_CLASS_INF &&
+ p->err.err_src >= P7IOC_ERR_SRC_PHB0 &&
+ p->err.err_src <= P7IOC_ERR_SRC_PHB5) {
+ p7ioc_ER_err_clear(p);
+ p7ioc_phb_set_err_pending(p, false);
+ }
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * We don't support address remapping now since all M64
+ * BARs are sharing on remapping base address. We might
+ * introduce flag to the PHB in order to trace that. The
+ * flag allows to be changed for once. It's something to
+ * do in future.
+ */
+static int64_t p7ioc_set_phb_mem_window(struct phb *phb,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint64_t base,
+ uint64_t __unused pci_base,
+ uint64_t size)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t data64;
+
+ switch (window_type) {
+ case OPAL_IO_WINDOW_TYPE:
+ case OPAL_M32_WINDOW_TYPE:
+ return OPAL_UNSUPPORTED;
+ case OPAL_M64_WINDOW_TYPE:
+ if (window_num >= 16)
+ return OPAL_PARAMETER;
+ /* The base and size should be 16MB aligned */
+ if (base & 0xFFFFFF || size & 0xFFFFFF)
+ return OPAL_PARAMETER;
+ data64 = p->m64b_cache[window_num];
+ data64 = SETFIELD(IODA_M64BT_BASE, data64, base >> 24);
+ size = (size >> 24);
+ data64 = SETFIELD(IODA_M64BT_MASK, data64, 0x1000000 - size);
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /*
+ * If the M64 BAR hasn't enabled yet, we needn't flush
+ * the setting to hardware and just keep it to the cache
+ */
+ p->m64b_cache[window_num] = data64;
+ if (!(data64 & IODA_M64BT_ENABLE))
+ return OPAL_SUCCESS;
+ p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, window_num, false);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * We can't enable or disable I/O and M32 dynamically, even
+ * unnecessary. So the function only support M64 BARs.
+ */
+static int64_t p7ioc_phb_mmio_enable(struct phb *phb,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint16_t enable)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t data64, base, mask;
+
+ switch (window_type) {
+ case OPAL_IO_WINDOW_TYPE:
+ case OPAL_M32_WINDOW_TYPE:
+ return OPAL_UNSUPPORTED;
+ case OPAL_M64_WINDOW_TYPE:
+ if (window_num >= 16 ||
+ enable >= OPAL_ENABLE_M64_NON_SPLIT)
+ return OPAL_PARAMETER;
+
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /*
+ * While enabling one specific M64 BAR, we should have
+ * the base/size configured correctly. Otherwise, it
+ * probably incurs fenced AIB.
+ */
+ data64 = p->m64b_cache[window_num];
+ if (enable == OPAL_ENABLE_M64_SPLIT) {
+ base = GETFIELD(IODA_M64BT_BASE, data64);
+ base = (base << 24);
+ mask = GETFIELD(IODA_M64BT_MASK, data64);
+ if (base < p->m64_base || mask == 0x0ul)
+ return OPAL_PARTIAL;
+
+ data64 |= IODA_M64BT_ENABLE;
+ } else if (enable == OPAL_DISABLE_M64) {
+ data64 &= ~IODA_M64BT_ENABLE;
+ } else
+ return OPAL_PARAMETER;
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, window_num, false);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ p->m64b_cache[window_num] = data64;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_map_pe_mmio_window(struct phb *phb, uint16_t pe_number,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint16_t segment_num)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t tbl, index;
+ uint64_t *cache;
+
+ if (pe_number > 127)
+ return OPAL_PARAMETER;
+
+ switch(window_type) {
+ case OPAL_IO_WINDOW_TYPE:
+ if (window_num != 0 || segment_num > 127)
+ return OPAL_PARAMETER;
+ tbl = IODA_TBL_IODT;
+ index = segment_num;
+ cache = &p->iod_cache[index];
+ break;
+ case OPAL_M32_WINDOW_TYPE:
+ if (window_num != 0 || segment_num > 127)
+ return OPAL_PARAMETER;
+ tbl = IODA_TBL_M32DT;
+ index = segment_num;
+ cache = &p->m32d_cache[index];
+ break;
+ case OPAL_M64_WINDOW_TYPE:
+ if (window_num > 15 || segment_num > 7)
+ return OPAL_PARAMETER;
+
+ tbl = IODA_TBL_M64DT;
+ index = window_num << 3 | segment_num;
+ cache = &p->m64d_cache[index];
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ p7ioc_phb_ioda_sel(p, tbl, index, false);
+ out_be64(p->regs + PHB_IODA_DATA0,
+ SETFIELD(IODA_XXDT_PE, 0ull, pe_number));
+
+ /* Update cache */
+ *cache = SETFIELD(IODA_XXDT_PE, 0ull, pe_number);
+
+ return OPAL_SUCCESS;
+}
+
+
+static int64_t p7ioc_set_pe(struct phb *phb, uint64_t pe_number,
+ uint64_t bdfn, uint8_t bus_compare,
+ uint8_t dev_compare, uint8_t func_compare,
+ uint8_t pe_action)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t pelt;
+ uint64_t *cache = &p->peltm_cache[pe_number];
+
+ if (pe_number > 127 || bdfn > 0xffff)
+ return OPAL_PARAMETER;
+ if (pe_action != OPAL_MAP_PE && pe_action != OPAL_UNMAP_PE)
+ return OPAL_PARAMETER;
+ if (bus_compare > 7)
+ return OPAL_PARAMETER;
+
+ if (pe_action == OPAL_MAP_PE) {
+ pelt = SETFIELD(IODA_PELTM_BUS, 0ul, bdfn >> 8);
+ pelt |= SETFIELD(IODA_PELTM_DEV, 0ul, (bdfn >> 3) & 0x1f);
+ pelt |= SETFIELD(IODA_PELTM_FUNC, 0ul, bdfn & 0x7);
+ pelt |= SETFIELD(IODA_PELTM_BUS_VALID, 0ul, bus_compare);
+ if (dev_compare)
+ pelt |= IODA_PELTM_DEV_VALID;
+ if (func_compare)
+ pelt |= IODA_PELTM_FUNC_VALID;
+ } else
+ pelt = 0;
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, pe_number, false);
+ out_be64(p->regs + PHB_IODA_DATA0, pelt);
+
+ /* Update cache */
+ *cache = pelt;
+
+ return OPAL_SUCCESS;
+}
+
+
+static int64_t p7ioc_set_peltv(struct phb *phb, uint32_t parent_pe,
+ uint32_t child_pe, uint8_t state)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint32_t reg;
+ uint64_t mask, peltv;
+ uint64_t *cache;
+ if (parent_pe > 127 || child_pe > 127)
+ return OPAL_PARAMETER;
+
+ cache = (child_pe >> 6) ? &p->peltv_hi_cache[parent_pe] :
+ &p->peltv_lo_cache[parent_pe];
+ reg = (child_pe >> 6) ? PHB_IODA_DATA1 : PHB_IODA_DATA0;
+ child_pe &= 0x2f;
+ mask = 1ull << (63 - child_pe);
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PELTV, parent_pe, false);
+ peltv = in_be64(p->regs + reg);
+ if (state)
+ peltv |= mask;
+ else
+ peltv &= ~mask;
+ out_be64(p->regs + reg, peltv);
+
+ /* Update cache */
+ *cache = peltv;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_map_pe_dma_window(struct phb *phb, uint16_t pe_number,
+ uint16_t window_id, uint16_t tce_levels,
+ uint64_t tce_table_addr,
+ uint64_t tce_table_size,
+ uint64_t tce_page_size)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t tvt0, tvt1, t, pelt;
+ uint64_t dma_window_size;
+ uint64_t *cache_lo, *cache_hi;
+
+ if (pe_number > 127 || window_id > 255 || tce_levels != 1)
+ return OPAL_PARAMETER;
+ cache_lo = &p->tve_lo_cache[window_id];
+ cache_hi = &p->tve_hi_cache[window_id];
+
+ /* Encode table size */
+ dma_window_size = tce_page_size * (tce_table_size >> 3);
+ t = ilog2(dma_window_size);
+ if (t < 27)
+ return OPAL_PARAMETER;
+ tvt0 = SETFIELD(IODA_TVT0_TCE_TABLE_SIZE, 0ul, (t - 26));
+
+ /* Encode TCE page size */
+ switch(tce_page_size) {
+ case 0x1000: /* 4K */
+ tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 1ul);
+ break;
+ case 0x10000: /* 64K */
+ tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 5ul);
+ break;
+ case 0x1000000: /* 16M */
+ tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 13ul);
+ break;
+ case 0x400000000: /* 16G */
+ tvt1 = SETFIELD(IODA_TVT1_IO_PSIZE, 0ul, 23ul);
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /* XXX Hub number ... leave 0 for now */
+
+ /* Shift in the address. The table address is "off by 4 bits"
+ * but since the field is itself shifted by 16, we basically
+ * need to write the address >> 12, which basically boils down
+ * to writing a 4k page address
+ */
+ tvt0 = SETFIELD(IODA_TVT0_TABLE_ADDR, tvt0, tce_table_addr >> 12);
+
+ /* Read the PE filter info from the PELT-M */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, pe_number, false);
+ pelt = in_be64(p->regs + PHB_IODA_DATA0);
+
+ /* Copy in filter bits from PELT */
+ tvt0 = SETFIELD(IODA_TVT0_BUS_VALID, tvt0,
+ GETFIELD(IODA_PELTM_BUS_VALID, pelt));
+ tvt0 = SETFIELD(IODA_TVT0_BUS_NUM, tvt0,
+ GETFIELD(IODA_PELTM_BUS, pelt));
+ tvt1 = SETFIELD(IODA_TVT1_DEV_NUM, tvt1,
+ GETFIELD(IODA_PELTM_DEV, pelt));
+ tvt1 = SETFIELD(IODA_TVT1_FUNC_NUM, tvt1,
+ GETFIELD(IODA_PELTM_FUNC, pelt));
+ if (pelt & IODA_PELTM_DEV_VALID)
+ tvt1 |= IODA_TVT1_DEV_VALID;
+ if (pelt & IODA_PELTM_FUNC_VALID)
+ tvt1 |= IODA_TVT1_FUNC_VALID;
+ tvt1 = SETFIELD(IODA_TVT1_PE_NUM, tvt1, pe_number);
+
+ /* Write the TVE */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_TVT, window_id, false);
+ out_be64(p->regs + PHB_IODA_DATA1, tvt1);
+ out_be64(p->regs + PHB_IODA_DATA0, tvt0);
+
+ /* Update cache */
+ *cache_lo = tvt0;
+ *cache_hi = tvt1;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_map_pe_dma_window_real(struct phb *phb __unused,
+ uint16_t pe_number __unused,
+ uint16_t dma_window_num __unused,
+ uint64_t pci_start_addr __unused,
+ uint64_t pci_mem_size __unused)
+{
+ /* XXX Not yet implemented (not yet used by Linux) */
+ return OPAL_UNSUPPORTED;
+}
+
+static int64_t p7ioc_set_mve(struct phb *phb, uint32_t mve_number,
+ uint32_t pe_number)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t pelt, mve = 0;
+ uint64_t *cache = &p->mve_cache[mve_number];
+
+ if (pe_number > 127 || mve_number > 255)
+ return OPAL_PARAMETER;
+
+ /* Read the PE filter info from the PELT-M */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, pe_number, false);
+ pelt = in_be64(p->regs + PHB_IODA_DATA0);
+
+ mve = SETFIELD(IODA_MVT_BUS_VALID, mve,
+ GETFIELD(IODA_PELTM_BUS_VALID, pelt));
+ mve = SETFIELD(IODA_MVT_BUS_NUM, mve,
+ GETFIELD(IODA_PELTM_BUS, pelt));
+ mve = SETFIELD(IODA_MVT_DEV_NUM, mve,
+ GETFIELD(IODA_PELTM_DEV, pelt));
+ mve = SETFIELD(IODA_MVT_FUNC_NUM, mve,
+ GETFIELD(IODA_PELTM_FUNC, pelt));
+ if (pelt & IODA_PELTM_DEV_VALID)
+ mve |= IODA_MVT_DEV_VALID;
+ if (pelt & IODA_PELTM_FUNC_VALID)
+ mve |= IODA_MVT_FUNC_VALID;
+ mve = SETFIELD(IODA_MVT_PE_NUM, mve, pe_number);
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_MVT, mve_number, false);
+ out_be64(p->regs + PHB_IODA_DATA0, mve);
+
+ /* Update cache */
+ *cache = mve;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_set_mve_enable(struct phb *phb, uint32_t mve_number,
+ uint32_t state)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t mve;
+ uint64_t *cache = &p->mve_cache[mve_number];
+
+ if (mve_number > 255)
+ return OPAL_PARAMETER;
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_MVT, mve_number, false);
+ mve = in_be64(p->regs + PHB_IODA_DATA0);
+ if (state)
+ mve |= IODA_MVT_VALID;
+ else
+ mve &= ~IODA_MVT_VALID;
+ out_be64(p->regs + PHB_IODA_DATA0, mve);
+
+ /* Update cache */
+ *cache = mve;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_set_xive_pe(struct phb *phb, uint32_t pe_number,
+ uint32_t xive_num)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ uint64_t xive;
+
+ if (pe_number > 127 || xive_num > 255)
+ return OPAL_PARAMETER;
+
+ /* Update MXIVE cache */
+ xive = p->mxive_cache[xive_num];
+ xive = SETFIELD(IODA_XIVT_PENUM, xive, pe_number);
+ p->mxive_cache[xive_num] = xive;
+
+ /* Update HW */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_MXIVT, xive_num, false);
+ xive = in_be64(p->regs + PHB_IODA_DATA0);
+ xive = SETFIELD(IODA_XIVT_PENUM, xive, pe_number);
+ out_be64(p->regs + PHB_IODA_DATA0, xive);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_get_xive_source(struct phb *phb, uint32_t xive_num,
+ int32_t *interrupt_source_number)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+
+ if (xive_num > 255 || !interrupt_source_number)
+ return OPAL_PARAMETER;
+
+ *interrupt_source_number = (p->buid_msi << 4) | xive_num;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_get_msi_32(struct phb *phb __unused, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ uint32_t *msi_address, uint32_t *message_data)
+{
+ if (mve_number > 255 || xive_num > 255 || msi_range != 1)
+ return OPAL_PARAMETER;
+
+ *msi_address = 0xffff0000 | (mve_number << 4);
+ *message_data = xive_num;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t p7ioc_get_msi_64(struct phb *phb __unused, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ uint64_t *msi_address, uint32_t *message_data)
+{
+ if (mve_number > 255 || xive_num > 255 || msi_range != 1)
+ return OPAL_PARAMETER;
+
+ *msi_address = (9ul << 60) | (((u64)mve_number) << 48);
+ *message_data = xive_num;
+
+ return OPAL_SUCCESS;
+}
+
+static void p7ioc_root_port_init(struct phb *phb, struct pci_device *dev,
+ int ecap, int aercap)
+{
+ uint16_t bdfn = dev->bdfn;
+ uint16_t val16;
+ uint32_t val32;
+
+ /* Enable SERR and parity checking */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
+ val16 |= (PCI_CFG_CMD_SERR_EN | PCI_CFG_CMD_PERR_RESP);
+ pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
+
+ /* Enable reporting various errors */
+ if (!ecap) return;
+ pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
+ val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
+ PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT |
+ PCICAP_EXP_DEVCTL_UR_REPORT);
+ pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
+
+ /* Mask various unrecoverable errors */
+ if (!aercap) return;
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, &val32);
+ val32 |= (PCIECAP_AER_UE_MASK_POISON_TLP |
+ PCIECAP_AER_UE_MASK_COMPL_TIMEOUT |
+ PCIECAP_AER_UE_MASK_COMPL_ABORT |
+ PCIECAP_AER_UE_MASK_ECRC);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, val32);
+
+ /* Report various unrecoverable errors as fatal errors */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, &val32);
+ val32 |= (PCIECAP_AER_UE_SEVERITY_DLLP |
+ PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
+ PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_SEVERITY_UNEXP_COMPL |
+ PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
+ PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
+
+ /* Mask various recoverable errors */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, &val32);
+ val32 |= PCIECAP_AER_CE_MASK_ADV_NONFATAL;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
+
+ /* Enable ECRC check */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
+ val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
+ PCIECAP_AER_CAPCTL_ECRCC_EN);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
+
+ /* Enable all error reporting */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, &val32);
+ val32 |= (PCIECAP_AER_RERR_CMD_FE |
+ PCIECAP_AER_RERR_CMD_NFE |
+ PCIECAP_AER_RERR_CMD_CE);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, val32);
+}
+
+static void p7ioc_switch_port_init(struct phb *phb,
+ struct pci_device *dev,
+ int ecap, int aercap)
+{
+ uint16_t bdfn = dev->bdfn;
+ uint16_t val16;
+ uint32_t val32;
+
+ /* Enable SERR and parity checking and disable INTx */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
+ val16 |= (PCI_CFG_CMD_PERR_RESP |
+ PCI_CFG_CMD_SERR_EN |
+ PCI_CFG_CMD_INTx_DIS);
+ pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
+
+ /* Disable partity error and enable system error */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_BRCTL, &val16);
+ val16 &= ~PCI_CFG_BRCTL_PERR_RESP_EN;
+ val16 |= PCI_CFG_BRCTL_SERR_EN;
+ pci_cfg_write16(phb, bdfn, PCI_CFG_BRCTL, val16);
+
+ /* Enable reporting various errors */
+ if (!ecap) return;
+ pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
+ val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
+ PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT);
+ pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
+
+ /* Unmask all unrecoverable errors */
+ if (!aercap) return;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, 0x0);
+
+ /* Severity of unrecoverable errors */
+ if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT)
+ val32 = (PCIECAP_AER_UE_SEVERITY_DLLP |
+ PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
+ PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
+ PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP |
+ PCIECAP_AER_UE_SEVERITY_INTERNAL);
+ else
+ val32 = (PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_SEVERITY_INTERNAL);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
+
+ /* Mask various correctable errors */
+ val32 = PCIECAP_AER_CE_MASK_ADV_NONFATAL;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
+
+ /* Enable ECRC generation and disable ECRC check */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
+ val32 |= PCIECAP_AER_CAPCTL_ECRCG_EN;
+ val32 &= ~PCIECAP_AER_CAPCTL_ECRCC_EN;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
+}
+
+static void p7ioc_endpoint_init(struct phb *phb,
+ struct pci_device *dev,
+ int ecap, int aercap)
+{
+ uint16_t bdfn = dev->bdfn;
+ uint16_t val16;
+ uint32_t val32;
+
+ /* Enable SERR and parity checking */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
+ val16 |= (PCI_CFG_CMD_PERR_RESP |
+ PCI_CFG_CMD_SERR_EN);
+ pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
+
+ /* Enable reporting various errors */
+ if (!ecap) return;
+ pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
+ val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
+ val16 |= (PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT |
+ PCICAP_EXP_DEVCTL_UR_REPORT);
+ pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
+
+ /* Enable ECRC generation and check */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
+ val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
+ PCIECAP_AER_CAPCTL_ECRCC_EN);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
+}
+
+static void p7ioc_device_init(struct phb *phb, struct pci_device *dev)
+{
+ int ecap = 0;
+ int aercap = 0;
+
+ /* Figure out AER capability */
+ if (pci_has_cap(dev, PCI_CFG_CAP_ID_EXP, false)) {
+ ecap = pci_cap(dev, PCI_CFG_CAP_ID_EXP, false);
+
+ if (!pci_has_cap(dev, PCIECAP_ID_AER, true)) {
+ aercap = pci_find_ecap(phb, dev->bdfn,
+ PCIECAP_ID_AER, NULL);
+ if (aercap > 0)
+ pci_set_cap(dev, PCIECAP_ID_AER, aercap, true);
+ } else {
+ aercap = pci_cap(dev, PCIECAP_ID_AER, true);
+ }
+ }
+
+ /* Reconfigure the MPS */
+ pci_configure_mps(phb, dev);
+
+ if (dev->dev_type == PCIE_TYPE_ROOT_PORT)
+ p7ioc_root_port_init(phb, dev, ecap, aercap);
+ else if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT ||
+ dev->dev_type == PCIE_TYPE_SWITCH_DNPORT)
+ p7ioc_switch_port_init(phb, dev, ecap, aercap);
+ else
+ p7ioc_endpoint_init(phb, dev, ecap, aercap);
+}
+
+static int64_t p7ioc_pci_reinit(struct phb *phb,
+ uint64_t scope, uint64_t data)
+{
+ struct pci_device *pd;
+ uint16_t bdfn = data;
+
+ if (scope != OPAL_REINIT_PCI_DEV)
+ return OPAL_PARAMETER;
+
+ pd = pci_find_dev(phb, bdfn);
+ if (!pd)
+ return OPAL_PARAMETER;
+
+ p7ioc_device_init(phb, pd);
+ return OPAL_SUCCESS;
+}
+
+static uint8_t p7ioc_choose_bus(struct phb *phb __unused,
+ struct pci_device *bridge,
+ uint8_t candidate, uint8_t *max_bus,
+ bool *use_max)
+{
+ uint8_t m, al;
+ int i;
+
+ /* Bus number selection is nasty on P7IOC. Our EEH HW can only cope
+ * with bus ranges that are naturally aligned powers of two. It also
+ * has "issues" with dealing with more than 32 bus numbers.
+ *
+ * On the other hand we can deal with overlaps to some extent as
+ * the PELT-M entries are ordered.
+ *
+ * We also don't need to bother with the busses between the upstream
+ * and downstream ports of switches.
+ *
+ * For now we apply this simple mechanism which matche what OFW does
+ * under OPAL:
+ *
+ * - Top level bus (PHB to RC) is 0
+ * - RC to first device is 1..ff
+ * - Then going down, a switch gets (N = parent bus, M = parent max)
+ * * Upstream bridge is N+1, M, use_max = false
+ * * Downstream bridge is closest power of two from 32 down and
+ * * use max
+ *
+ * XXX NOTE: If we have access to HW VPDs, we could know whether
+ * this is a bridge with a single device on it such as IPR and
+ * limit ourselves to a single bus number.
+ */
+
+ /* Default use_max is false (legacy) */
+ *use_max = false;
+
+ /* If we are the root complex or we are not in PCIe land anymore, just
+ * use legacy algorithm
+ */
+ if (!bridge || !pci_has_cap(bridge, PCI_CFG_CAP_ID_EXP, false))
+ return candidate;
+
+ /* Figure out the bridge type */
+ switch(bridge->dev_type) {
+ case PCIE_TYPE_PCIX_TO_PCIE:
+ /* PCI-X to PCIE ... hrm, let's not bother too much with that */
+ return candidate;
+ case PCIE_TYPE_SWITCH_UPPORT:
+ case PCIE_TYPE_ROOT_PORT:
+ /* Upstream port, we use legacy handling as well */
+ return candidate;
+ case PCIE_TYPE_SWITCH_DNPORT:
+ case PCIE_TYPE_PCIE_TO_PCIX:
+ /* That leaves us with the interesting cases that we handle */
+ break;
+ default:
+ /* Should not happen, treat as legacy */
+ prerror("PCI: Device %04x has unsupported type %d in choose_bus\n",
+ bridge->bdfn, bridge->dev_type);
+ return candidate;
+ }
+
+ /* Ok, let's find a power of two that fits, fallback to 1 */
+ for (i = 5; i >= 0; i--) {
+ m = (1 << i) - 1;
+ al = (candidate + m) & ~m;
+ if (al <= *max_bus && (al + m) <= *max_bus)
+ break;
+ }
+ if (i < 0)
+ return 0;
+ *use_max = true;
+ *max_bus = al + m;
+ return al;
+}
+
+/* p7ioc_phb_init_ioda_cache - Reset the IODA cache values
+ */
+static void p7ioc_phb_init_ioda_cache(struct p7ioc_phb *p)
+{
+ unsigned int i;
+
+ for (i = 0; i < 8; i++)
+ p->lxive_cache[i] = SETFIELD(IODA_XIVT_PRIORITY, 0ull, 0xff);
+ for (i = 0; i < 256; i++) {
+ p->mxive_cache[i] = SETFIELD(IODA_XIVT_PRIORITY, 0ull, 0xff);
+ p->mve_cache[i] = 0;
+ }
+ for (i = 0; i < 16; i++)
+ p->m64b_cache[i] = 0;
+
+ /*
+ * Since there is only one root port under the PHB,
+ * We make all PELTM entries except last one to be
+ * invalid by configuring their RID to 00:00.1. The
+ * last entry is to encompass all RIDs.
+ */
+ for (i = 0; i < 127; i++)
+ p->peltm_cache[i] = 0x0001f80000000000;
+ p->peltm_cache[127] = 0x0ul;
+
+ for (i = 0; i < 128; i++) {
+ p->peltv_lo_cache[i] = 0;
+ p->peltv_hi_cache[i] = 0;
+ p->tve_lo_cache[i] = 0;
+ p->tve_hi_cache[i] = 0;
+ p->iod_cache[i] = 0;
+ p->m32d_cache[i] = 0;
+ p->m64d_cache[i] = 0;
+ }
+}
+
+/* p7ioc_phb_ioda_reset - Reset the IODA tables
+ *
+ * @purge: If true, the cache is cleared and the cleared values
+ * are applied to HW. If false, the cached values are
+ * applied to HW
+ *
+ * This reset the IODA tables in the PHB. It is called at
+ * initialization time, on PHB reset, and can be called
+ * explicitly from OPAL
+ */
+static int64_t p7ioc_ioda_reset(struct phb *phb, bool purge)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ unsigned int i;
+ uint64_t reg64;
+ uint64_t data64, data64_hi;
+ uint8_t prio;
+ uint16_t server;
+ uint64_t m_server, m_prio;
+
+ /* If the "purge" argument is set, we clear the table cache */
+ if (purge)
+ p7ioc_phb_init_ioda_cache(p);
+
+ /* Init_18..19: Setup the HRT
+ *
+ * XXX NOTE: I still don't completely get that HRT business so
+ * I'll just mimmic BML and put the PHB number + 1 in there
+ */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_HRT, 0, true);
+ out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
+ out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
+ out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
+ out_be64(p->regs + PHB_IODA_DATA0, p->index + 1);
+
+ /* Init_20..21: Cleanup the LXIVT
+ *
+ * We set the priority to FF (masked) and clear everything
+ * else. That means we leave the HRT index to 0 which is
+ * going to remain unmodified... for now.
+ */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_LXIVT, 0, true);
+ for (i = 0; i < 8; i++) {
+ data64 = p->lxive_cache[i];
+ server = GETFIELD(IODA_XIVT_SERVER, data64);
+ prio = GETFIELD(IODA_XIVT_PRIORITY, data64);
+
+ /* Now we mangle the server and priority */
+ if (prio == 0xff) {
+ m_server = 0;
+ m_prio = 0xff;
+ } else {
+ m_server = server >> 3;
+ m_prio = (prio >> 3) | ((server & 7) << 5);
+ }
+
+ data64 = SETFIELD(IODA_XIVT_SERVER, data64, m_server);
+ data64 = SETFIELD(IODA_XIVT_PRIORITY, data64, m_prio);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_22..23: Cleanup the MXIVT
+ *
+ * We set the priority to FF (masked) and clear everything
+ * else. That means we leave the HRT index to 0 which is
+ * going to remain unmodified... for now.
+ */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_MXIVT, 0, true);
+ for (i = 0; i < 256; i++) {
+ data64 = p->mxive_cache[i];
+ server = GETFIELD(IODA_XIVT_SERVER, data64);
+ prio = GETFIELD(IODA_XIVT_PRIORITY, data64);
+
+ /* Now we mangle the server and priority */
+ if (prio == 0xff) {
+ m_server = 0;
+ m_prio = 0xff;
+ } else {
+ m_server = server >> 3;
+ m_prio = (prio >> 3) | ((server & 7) << 5);
+ }
+
+ data64 = SETFIELD(IODA_XIVT_SERVER, data64, m_server);
+ data64 = SETFIELD(IODA_XIVT_PRIORITY, data64, m_prio);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_24..25: Cleanup the MVT */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_MVT, 0, true);
+ for (i = 0; i < 256; i++) {
+ data64 = p->mve_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_26..27: Cleanup the PELTM
+ *
+ * A completely clear PELTM should make everything match PE 0
+ */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PELTM, 0, true);
+ for (i = 0; i < 127; i++) {
+ data64 = p->peltm_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_28..30: Cleanup the PELTV */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PELTV, 0, true);
+ for (i = 0; i < 127; i++) {
+ data64 = p->peltv_lo_cache[i];
+ data64_hi = p->peltv_hi_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA1, data64_hi);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_31..33: Cleanup the TVT */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_TVT, 0, true);
+ for (i = 0; i < 127; i++) {
+ data64 = p->tve_lo_cache[i];
+ data64_hi = p->tve_hi_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA1, data64_hi);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_34..35: Cleanup the M64BT
+ *
+ * We don't enable M64 BARs by default. However,
+ * we shouldn't purge the hw and cache for it in
+ * future.
+ */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, 0, true);
+ for (i = 0; i < 16; i++)
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+
+ /* Init_36..37: Cleanup the IODT */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_IODT, 0, true);
+ for (i = 0; i < 127; i++) {
+ data64 = p->iod_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_38..39: Cleanup the M32DT */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_M32DT, 0, true);
+ for (i = 0; i < 127; i++) {
+ data64 = p->m32d_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_40..41: Cleanup the M64DT */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_M64BT, 0, true);
+ for (i = 0; i < 16; i++) {
+ data64 = p->m64b_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_M64DT, 0, true);
+ for (i = 0; i < 127; i++) {
+ data64 = p->m64d_cache[i];
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Clear up the TCE cache */
+ reg64 = in_be64(p->regs + PHB_PHB2_CONFIG);
+ reg64 &= ~PHB_PHB2C_64B_TCE_EN;
+ out_be64(p->regs + PHB_PHB2_CONFIG, reg64);
+ reg64 |= PHB_PHB2C_64B_TCE_EN;
+ out_be64(p->regs + PHB_PHB2_CONFIG, reg64);
+ in_be64(p->regs + PHB_PHB2_CONFIG);
+
+ /* Clear PEST & PEEV */
+ for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
+ uint64_t pesta, pestb;
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTA, i, false);
+ pesta = in_be64(p->regs + PHB_IODA_DATA0);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PESTB, i, false);
+ pestb = in_be64(p->regs + PHB_IODA_DATA0);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+
+ if ((pesta & IODA_PESTA_MMIO_FROZEN) ||
+ (pestb & IODA_PESTB_DMA_STOPPED))
+ PHBDBG(p, "Frozen PE#%d (%s - %s)\n",
+ i, (pestb & IODA_PESTB_DMA_STOPPED) ? "DMA" : "",
+ (pesta & IODA_PESTA_MMIO_FROZEN) ? "MMIO" : "");
+ }
+
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
+ for (i = 0; i < 2; i++)
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+
+ return OPAL_SUCCESS;
+}
+
+static const struct phb_ops p7ioc_phb_ops = {
+ .lock = p7ioc_phb_lock,
+ .unlock = p7ioc_phb_unlock,
+ .cfg_read8 = p7ioc_pcicfg_read8,
+ .cfg_read16 = p7ioc_pcicfg_read16,
+ .cfg_read32 = p7ioc_pcicfg_read32,
+ .cfg_write8 = p7ioc_pcicfg_write8,
+ .cfg_write16 = p7ioc_pcicfg_write16,
+ .cfg_write32 = p7ioc_pcicfg_write32,
+ .choose_bus = p7ioc_choose_bus,
+ .device_init = p7ioc_device_init,
+ .pci_reinit = p7ioc_pci_reinit,
+ .eeh_freeze_status = p7ioc_eeh_freeze_status,
+ .eeh_freeze_clear = p7ioc_eeh_freeze_clear,
+ .get_diag_data = NULL,
+ .get_diag_data2 = p7ioc_get_diag_data,
+ .next_error = p7ioc_eeh_next_error,
+ .phb_mmio_enable = p7ioc_phb_mmio_enable,
+ .set_phb_mem_window = p7ioc_set_phb_mem_window,
+ .map_pe_mmio_window = p7ioc_map_pe_mmio_window,
+ .set_pe = p7ioc_set_pe,
+ .set_peltv = p7ioc_set_peltv,
+ .map_pe_dma_window = p7ioc_map_pe_dma_window,
+ .map_pe_dma_window_real = p7ioc_map_pe_dma_window_real,
+ .set_mve = p7ioc_set_mve,
+ .set_mve_enable = p7ioc_set_mve_enable,
+ .set_xive_pe = p7ioc_set_xive_pe,
+ .get_xive_source = p7ioc_get_xive_source,
+ .get_msi_32 = p7ioc_get_msi_32,
+ .get_msi_64 = p7ioc_get_msi_64,
+ .ioda_reset = p7ioc_ioda_reset,
+ .presence_detect = p7ioc_presence_detect,
+ .link_state = p7ioc_link_state,
+ .power_state = p7ioc_power_state,
+ .slot_power_off = p7ioc_slot_power_off,
+ .slot_power_on = p7ioc_slot_power_on,
+ .complete_reset = p7ioc_complete_reset,
+ .hot_reset = p7ioc_hot_reset,
+ .fundamental_reset = p7ioc_freset,
+ .poll = p7ioc_poll,
+};
+
+/* p7ioc_phb_get_xive - Interrupt control from OPAL */
+static int64_t p7ioc_msi_get_xive(void *data, uint32_t isn,
+ uint16_t *server, uint8_t *prio)
+{
+ struct p7ioc_phb *p = data;
+ uint32_t irq, fbuid = P7_IRQ_FBUID(isn);
+ uint64_t xive;
+
+ if (fbuid < p->buid_msi || fbuid >= (p->buid_msi + 0x10))
+ return OPAL_PARAMETER;
+
+ irq = isn & 0xff;
+ xive = p->mxive_cache[irq];
+
+ *server = GETFIELD(IODA_XIVT_SERVER, xive);
+ *prio = GETFIELD(IODA_XIVT_PRIORITY, xive);
+
+ return OPAL_SUCCESS;
+}
+
+/* p7ioc_phb_set_xive - Interrupt control from OPAL */
+static int64_t p7ioc_msi_set_xive(void *data, uint32_t isn,
+ uint16_t server, uint8_t prio)
+{
+ struct p7ioc_phb *p = data;
+ uint32_t irq, fbuid = P7_IRQ_FBUID(isn);
+ uint64_t xive, m_server, m_prio;
+
+ if (fbuid < p->buid_msi || fbuid >= (p->buid_msi + 0x10))
+ return OPAL_PARAMETER;
+
+ /* We cache the arguments because we have to mangle
+ * it in order to hijack 3 bits of priority to extend
+ * the server number
+ */
+ irq = isn & 0xff;
+ xive = p->mxive_cache[irq];
+ xive = SETFIELD(IODA_XIVT_SERVER, xive, server);
+ xive = SETFIELD(IODA_XIVT_PRIORITY, xive, prio);
+ p->mxive_cache[irq] = xive;
+
+ /* Now we mangle the server and priority */
+ if (prio == 0xff) {
+ m_server = 0;
+ m_prio = 0xff;
+ } else {
+ m_server = server >> 3;
+ m_prio = (prio >> 3) | ((server & 7) << 5);
+ }
+
+ /* We use HRT entry 0 always for now */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_MXIVT, irq, false);
+ xive = in_be64(p->regs + PHB_IODA_DATA0);
+ xive = SETFIELD(IODA_XIVT_SERVER, xive, m_server);
+ xive = SETFIELD(IODA_XIVT_PRIORITY, xive, m_prio);
+ out_be64(p->regs + PHB_IODA_DATA0, xive);
+
+ return OPAL_SUCCESS;
+}
+
+/* p7ioc_phb_get_xive - Interrupt control from OPAL */
+static int64_t p7ioc_lsi_get_xive(void *data, uint32_t isn,
+ uint16_t *server, uint8_t *prio)
+{
+ struct p7ioc_phb *p = data;
+ uint32_t irq = (isn & 0x7);
+ uint32_t fbuid = P7_IRQ_FBUID(isn);
+ uint64_t xive;
+
+ if (fbuid != p->buid_lsi)
+ return OPAL_PARAMETER;
+
+ xive = p->lxive_cache[irq];
+ *server = GETFIELD(IODA_XIVT_SERVER, xive);
+ *prio = GETFIELD(IODA_XIVT_PRIORITY, xive);
+
+ return OPAL_SUCCESS;
+}
+
+/* p7ioc_phb_set_xive - Interrupt control from OPAL */
+static int64_t p7ioc_lsi_set_xive(void *data, uint32_t isn,
+ uint16_t server, uint8_t prio)
+{
+ struct p7ioc_phb *p = data;
+ uint32_t irq = (isn & 0x7);
+ uint32_t fbuid = P7_IRQ_FBUID(isn);
+ uint64_t xive, m_server, m_prio;
+
+ if (fbuid != p->buid_lsi)
+ return OPAL_PARAMETER;
+
+ xive = SETFIELD(IODA_XIVT_SERVER, 0ull, server);
+ xive = SETFIELD(IODA_XIVT_PRIORITY, xive, prio);
+
+ /*
+ * We cache the arguments because we have to mangle
+ * it in order to hijack 3 bits of priority to extend
+ * the server number
+ */
+ p->lxive_cache[irq] = xive;
+
+ /* Now we mangle the server and priority */
+ if (prio == 0xff) {
+ m_server = 0;
+ m_prio = 0xff;
+ } else {
+ m_server = server >> 3;
+ m_prio = (prio >> 3) | ((server & 7) << 5);
+ }
+
+ /* We use HRT entry 0 always for now */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_LXIVT, irq, false);
+ xive = in_be64(p->regs + PHB_IODA_DATA0);
+ xive = SETFIELD(IODA_XIVT_SERVER, xive, m_server);
+ xive = SETFIELD(IODA_XIVT_PRIORITY, xive, m_prio);
+ out_be64(p->regs + PHB_IODA_DATA0, xive);
+
+ return OPAL_SUCCESS;
+}
+
+static void p7ioc_phb_err_interrupt(void *data, uint32_t isn)
+{
+ struct p7ioc_phb *p = data;
+ uint64_t peev0, peev1;
+
+ PHBDBG(p, "Got interrupt 0x%04x\n", isn);
+
+ opal_update_pending_evt(OPAL_EVENT_PCI_ERROR, OPAL_EVENT_PCI_ERROR);
+
+ /* If the PHB is broken, go away */
+ if (p->state == P7IOC_PHB_STATE_BROKEN)
+ return;
+
+ /*
+ * Check if there's an error pending and update PHB fence
+ * state and return, the ER error is drowned at this point
+ */
+ lock(&p->lock);
+ if (p7ioc_phb_fenced(p)) {
+ p->state = P7IOC_PHB_STATE_FENCED;
+ PHBERR(p, "ER error ignored, PHB fenced\n");
+ unlock(&p->lock);
+ return;
+ }
+
+ /*
+ * If we already had pending errors, which might be
+ * moved from IOC, then we needn't check PEEV to avoid
+ * overwriting the errors from IOC.
+ */
+ if (!p7ioc_phb_err_pending(p)) {
+ unlock(&p->lock);
+ return;
+ }
+
+ /*
+ * We don't have pending errors from IOC, it's safe
+ * to check PEEV for frozen PEs.
+ */
+ p7ioc_phb_ioda_sel(p, IODA_TBL_PEEV, 0, true);
+ peev0 = in_be64(p->regs + PHB_IODA_DATA0);
+ peev1 = in_be64(p->regs + PHB_IODA_DATA0);
+ if (peev0 || peev1) {
+ p->err.err_src = P7IOC_ERR_SRC_PHB0 + p->index;
+ p->err.err_class = P7IOC_ERR_CLASS_ER;
+ p->err.err_bit = 0;
+ p7ioc_phb_set_err_pending(p, true);
+ }
+ unlock(&p->lock);
+}
+
+/* MSIs (OS owned) */
+static const struct irq_source_ops p7ioc_msi_irq_ops = {
+ .get_xive = p7ioc_msi_get_xive,
+ .set_xive = p7ioc_msi_set_xive,
+};
+
+/* LSIs (OS owned) */
+static const struct irq_source_ops p7ioc_lsi_irq_ops = {
+ .get_xive = p7ioc_lsi_get_xive,
+ .set_xive = p7ioc_lsi_set_xive,
+};
+
+/* PHB Errors (Ski owned) */
+static const struct irq_source_ops p7ioc_phb_err_irq_ops = {
+ .get_xive = p7ioc_lsi_get_xive,
+ .set_xive = p7ioc_lsi_set_xive,
+ .interrupt = p7ioc_phb_err_interrupt,
+};
+
+static void p7ioc_pcie_add_node(struct p7ioc_phb *p)
+{
+
+ uint64_t reg[2], iob, m32b, m64b, tkill;
+ uint32_t lsibase, icsp = get_ics_phandle();
+ struct dt_node *np;
+
+ reg[0] = cleanup_addr((uint64_t)p->regs);
+ reg[1] = 0x100000;
+
+ np = dt_new_addr(p->ioc->dt_node, "pciex", reg[0]);
+ if (!np)
+ return;
+
+ p->phb.dt_node = np;
+ dt_add_property_strings(np, "compatible", "ibm,p7ioc-pciex",
+ "ibm,ioda-phb");
+ dt_add_property_strings(np, "device_type", "pciex");
+ dt_add_property(np, "reg", reg, sizeof(reg));
+ dt_add_property_cells(np, "#address-cells", 3);
+ dt_add_property_cells(np, "#size-cells", 2);
+ dt_add_property_cells(np, "#interrupt-cells", 1);
+ dt_add_property_cells(np, "bus-range", 0, 0xff);
+ dt_add_property_cells(np, "clock-frequency", 0x200, 0); /* ??? */
+ dt_add_property_cells(np, "interrupt-parent", icsp);
+ /* XXX FIXME: add slot-name */
+ //dt_property_cell("bus-width", 8); /* Figure it out from VPD ? */
+
+ /* "ranges", we only expose IO and M32
+ *
+ * Note: The kernel expects us to have chopped of 64k from the
+ * M32 size (for the 32-bit MSIs). If we don't do that, it will
+ * get confused (OPAL does it)
+ */
+ iob = cleanup_addr(p->io_base);
+ m32b = cleanup_addr(p->m32_base + M32_PCI_START);
+ dt_add_property_cells(np, "ranges",
+ /* IO space */
+ 0x01000000, 0x00000000, 0x00000000,
+ hi32(iob), lo32(iob), 0, PHB_IO_SIZE,
+ /* M32 space */
+ 0x02000000, 0x00000000, M32_PCI_START,
+ hi32(m32b), lo32(m32b), 0,M32_PCI_SIZE - 0x10000);
+
+ /* XXX FIXME: add opal-memwin32, dmawins, etc... */
+ m64b = cleanup_addr(p->m64_base);
+ dt_add_property_cells(np, "ibm,opal-m64-window",
+ hi32(m64b), lo32(m64b),
+ hi32(m64b), lo32(m64b),
+ hi32(PHB_M64_SIZE), lo32(PHB_M64_SIZE));
+ dt_add_property_cells(np, "ibm,opal-msi-ports", 256);
+ dt_add_property_cells(np, "ibm,opal-num-pes", 128);
+ dt_add_property_cells(np, "ibm,opal-reserved-pe", 127);
+ dt_add_property_cells(np, "ibm,opal-msi-ranges",
+ p->buid_msi << 4, 0x100);
+ tkill = reg[0] + PHB_TCE_KILL;
+ dt_add_property_cells(np, "ibm,opal-tce-kill",
+ hi32(tkill), lo32(tkill));
+
+ /* Add associativity properties */
+ add_chip_dev_associativity(np);
+
+ /* The interrupt maps will be generated in the RC node by the
+ * PCI code based on the content of this structure:
+ */
+ lsibase = p->buid_lsi << 4;
+ p->phb.lstate.int_size = 1;
+ p->phb.lstate.int_val[0][0] = lsibase + PHB_LSI_PCIE_INTA;
+ p->phb.lstate.int_val[1][0] = lsibase + PHB_LSI_PCIE_INTB;
+ p->phb.lstate.int_val[2][0] = lsibase + PHB_LSI_PCIE_INTC;
+ p->phb.lstate.int_val[3][0] = lsibase + PHB_LSI_PCIE_INTD;
+ p->phb.lstate.int_parent[0] = icsp;
+ p->phb.lstate.int_parent[1] = icsp;
+ p->phb.lstate.int_parent[2] = icsp;
+ p->phb.lstate.int_parent[3] = icsp;
+}
+
+/* p7ioc_phb_setup - Setup a p7ioc_phb data structure
+ *
+ * WARNING: This is called before the AIB register routing is
+ * established. If this wants to access PHB registers, it must
+ * use the ASB hard coded variant (slower)
+ */
+void p7ioc_phb_setup(struct p7ioc *ioc, uint8_t index)
+{
+ struct p7ioc_phb *p = &ioc->phbs[index];
+ unsigned int buid_base = ioc->buid_base + PHBn_BUID_BASE(index);
+
+ p->index = index;
+ p->ioc = ioc;
+ p->gen = 2; /* Operate in Gen2 mode by default */
+ p->phb.ops = &p7ioc_phb_ops;
+ p->phb.phb_type = phb_type_pcie_v2;
+ p->regs_asb = ioc->regs + PHBn_ASB_BASE(index);
+ p->regs = ioc->regs + PHBn_AIB_BASE(index);
+ p->buid_lsi = buid_base + PHB_BUID_LSI_OFFSET;
+ p->buid_msi = buid_base + PHB_BUID_MSI_OFFSET;
+ p->io_base = ioc->mmio1_win_start + PHBn_IO_BASE(index);
+ p->m32_base = ioc->mmio2_win_start + PHBn_M32_BASE(index);
+ p->m64_base = ioc->mmio2_win_start + PHBn_M64_BASE(index);
+ p->state = P7IOC_PHB_STATE_UNINITIALIZED;
+ p->phb.scan_map = 0x1; /* Only device 0 to scan */
+
+ /* Find P7IOC base location code in IOC */
+ p->phb.base_loc_code = dt_prop_get_def(ioc->dt_node,
+ "ibm,io-base-loc-code", NULL);
+ if (!p->phb.base_loc_code)
+ prerror("P7IOC: Base location code not found !\n");
+
+ /* Create device node for PHB */
+ p7ioc_pcie_add_node(p);
+
+ /* Register OS interrupt sources */
+ register_irq_source(&p7ioc_msi_irq_ops, p, p->buid_msi << 4, 256);
+ register_irq_source(&p7ioc_lsi_irq_ops, p, p->buid_lsi << 4, 4);
+
+ /* Register internal interrupt source (LSI 7) */
+ register_irq_source(&p7ioc_phb_err_irq_ops, p,
+ (p->buid_lsi << 4) + PHB_LSI_PCIE_ERROR, 1);
+
+ /* Initialize IODA table caches */
+ p7ioc_phb_init_ioda_cache(p);
+
+ /* We register the PHB before we initialize it so we
+ * get a useful OPAL ID for it
+ */
+ pci_register_phb(&p->phb);
+
+ /* Platform additional setup */
+ if (platform.pci_setup_phb)
+ platform.pci_setup_phb(&p->phb, p->index);
+}
+
+static bool p7ioc_phb_wait_dlp_reset(struct p7ioc_phb *p)
+{
+ unsigned int i;
+ uint64_t val;
+
+ /*
+ * Firmware cannot access the UTL core regs or PCI config space
+ * until the cores are out of DL_PGRESET.
+ * DL_PGRESET should be polled until it is inactive with a value
+ * of '0'. The recommended polling frequency is once every 1ms.
+ * Firmware should poll at least 200 attempts before giving up.
+ * MMIO Stores to the link are silently dropped by the UTL core if
+ * the link is down.
+ * MMIO Loads to the link will be dropped by the UTL core and will
+ * eventually time-out and will return an all ones response if the
+ * link is down.
+ */
+#define DLP_RESET_ATTEMPTS 400
+
+ printf("P7IOC: Waiting for DLP PG reset to complete...\n");
+ for (i = 0; i < DLP_RESET_ATTEMPTS; i++) {
+ val = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (!(val & PHB_PCIE_DLP_TC_DL_PGRESET))
+ break;
+ time_wait_ms(1);
+ }
+ if (val & PHB_PCIE_DLP_TC_DL_PGRESET) {
+ PHBERR(p, "Timeout waiting for DLP PG reset !\n");
+ return false;
+ }
+ return true;
+}
+
+/* p7ioc_phb_init_rc - Initialize the Root Complex config space
+ */
+static bool p7ioc_phb_init_rc_cfg(struct p7ioc_phb *p)
+{
+ int64_t ecap, aercap;
+
+ /* XXX Handle errors ? */
+
+ /* Init_51..51:
+ *
+ * Set primary bus to 0, secondary to 1 and subordinate to 0xff
+ */
+ p7ioc_pcicfg_write32(&p->phb, 0, PCI_CFG_PRIMARY_BUS, 0x00ff0100);
+
+ /* Init_52..57
+ *
+ * IO and Memory base & limits are set to base > limit, which
+ * allows all inbounds.
+ *
+ * XXX This has the potential of confusing the OS which might
+ * think that nothing is forwarded downstream. We probably need
+ * to fix this to match the IO and M32 PHB windows
+ */
+ p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_IO_BASE, 0x0010);
+ p7ioc_pcicfg_write32(&p->phb, 0, PCI_CFG_MEM_BASE, 0x00000010);
+ p7ioc_pcicfg_write32(&p->phb, 0, PCI_CFG_PREF_MEM_BASE, 0x00000010);
+
+ /* Init_58..: Setup bridge control to enable forwarding of CORR, FATAL,
+ * and NONFATAL errors
+ */
+ p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, PCI_CFG_BRCTL_SERR_EN);
+
+ /* Init_60..61
+ *
+ * PCIE Device control/status, enable error reporting, disable relaxed
+ * ordering, set MPS to 128 (see note), clear errors.
+ *
+ * Note: The doc recommends to set MPS to 4K. This has proved to have
+ * some issues as it requires specific claming of MRSS on devices and
+ * we've found devices in the field that misbehave when doing that.
+ *
+ * We currently leave it all to 128 bytes (minimum setting) at init
+ * time. The generic PCIe probing later on might apply a different
+ * value, or the kernel will, but we play it safe at early init
+ */
+ if (p->ecap <= 0) {
+ ecap = pci_find_cap(&p->phb, 0, PCI_CFG_CAP_ID_EXP);
+ if (ecap < 0) {
+ PHBERR(p, "Can't locate PCI-E capability\n");
+ return false;
+ }
+ p->ecap = ecap;
+ } else {
+ ecap = p->ecap;
+ }
+
+ p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DEVSTAT,
+ PCICAP_EXP_DEVSTAT_CE |
+ PCICAP_EXP_DEVSTAT_NFE |
+ PCICAP_EXP_DEVSTAT_FE |
+ PCICAP_EXP_DEVSTAT_UE);
+
+ p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DEVCTL,
+ PCICAP_EXP_DEVCTL_CE_REPORT |
+ PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT |
+ PCICAP_EXP_DEVCTL_UR_REPORT |
+ SETFIELD(PCICAP_EXP_DEVCTL_MPS, 0, PCIE_MPS_128B));
+
+ /* Init_62..63
+ *
+ * Root Control Register. Enable error reporting
+ *
+ * Note: Added CRS visibility.
+ */
+ p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_RC,
+ PCICAP_EXP_RC_SYSERR_ON_CE |
+ PCICAP_EXP_RC_SYSERR_ON_NFE |
+ PCICAP_EXP_RC_SYSERR_ON_FE |
+ PCICAP_EXP_RC_CRS_VISIBLE);
+
+ /* Init_64..65
+ *
+ * Device Control 2. Enable ARI fwd, set timer
+ */
+ p7ioc_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DCTL2,
+ SETFIELD(PCICAP_EXP_DCTL2_CMPTOUT, 0, 2) |
+ PCICAP_EXP_DCTL2_ARI_FWD);
+
+ /* Init_66..81
+ *
+ * AER inits
+ */
+ aercap = pci_find_ecap(&p->phb, 0, PCIECAP_ID_AER, NULL);
+ if (aercap < 0) {
+ /* Shouldn't happen */
+ PHBERR(p, "Failed to locate AER capability in bridge\n");
+ return false;
+ }
+ p->aercap = aercap;
+
+ /* Clear all UE status */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_STATUS,
+ 0xffffffff);
+ /* Disable some error reporting as per the P7IOC spec */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_MASK,
+ PCIECAP_AER_UE_POISON_TLP |
+ PCIECAP_AER_UE_COMPL_TIMEOUT |
+ PCIECAP_AER_UE_COMPL_ABORT |
+ PCIECAP_AER_UE_ECRC);
+ /* Report some errors as fatal */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_SEVERITY,
+ PCIECAP_AER_UE_DLP |
+ PCIECAP_AER_UE_SURPRISE_DOWN |
+ PCIECAP_AER_UE_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_UNEXP_COMPL |
+ PCIECAP_AER_UE_RECV_OVFLOW |
+ PCIECAP_AER_UE_MALFORMED_TLP);
+ /* Clear all CE status */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CE_STATUS,
+ 0xffffffff);
+ /* Disable some error reporting as per the P7IOC spec */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CE_MASK,
+ PCIECAP_AER_CE_ADV_NONFATAL);
+ /* Enable ECRC generation & checking */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CAPCTL,
+ PCIECAP_AER_CAPCTL_ECRCG_EN |
+ PCIECAP_AER_CAPCTL_ECRCC_EN);
+ /* Enable reporting in root error control */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_RERR_CMD,
+ PCIECAP_AER_RERR_CMD_FE |
+ PCIECAP_AER_RERR_CMD_NFE |
+ PCIECAP_AER_RERR_CMD_CE);
+ /* Clear root error status */
+ p7ioc_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_RERR_STA,
+ 0xffffffff);
+
+ return true;
+}
+
+static void p7ioc_phb_init_utl(struct p7ioc_phb *p)
+{
+ /* Init_82..84: Clear spurious errors and assign errors to the
+ * right "interrupt" signal
+ */
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_ERR_SEVERITY, 0x0000000000000000);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_IRQ_EN, 0xac80000000000000);
+
+ /* Init_85..89: Setup buffer allocations */
+ out_be64(p->regs + UTL_OUT_POST_DAT_BUF_ALLOC, 0x0400000000000000);
+ out_be64(p->regs + UTL_IN_POST_HDR_BUF_ALLOC, 0x1000000000000000);
+ out_be64(p->regs + UTL_IN_POST_DAT_BUF_ALLOC, 0x4000000000000000);
+ out_be64(p->regs + UTL_PCIE_TAGS_ALLOC, 0x0800000000000000);
+ out_be64(p->regs + UTL_GBIF_READ_TAGS_ALLOC, 0x0800000000000000);
+
+ /* Init_90: PCI Express port control */
+ out_be64(p->regs + UTL_PCIE_PORT_CONTROL, 0x8480000000000000);
+
+ /* Init_91..93: Clean & setup port errors */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xff7fffffffffffff);
+ out_be64(p->regs + UTL_PCIE_PORT_ERROR_SEV, 0x00e0000000000000);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0x7e65000000000000);
+
+ /* Init_94 : Cleanup RC errors */
+ out_be64(p->regs + UTL_RC_STATUS, 0xffffffffffffffff);
+}
+
+static void p7ioc_phb_init_errors(struct p7ioc_phb *p)
+{
+ /* Init_98: LEM Error Mask : Temporarily disable error interrupts */
+ out_be64(p->regs + PHB_LEM_ERROR_MASK, 0xffffffffffffffff);
+
+ /* Init_99..107: Configure main error traps & clear old state */
+ out_be64(p->regs + PHB_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR_LEM_ENABLE, 0xffffffffefffffff);
+ out_be64(p->regs + PHB_ERR_FREEZE_ENABLE, 0x0000000061c00000);
+ out_be64(p->regs + PHB_ERR_AIB_FENCE_ENABLE, 0xffffffc58c000000);
+ out_be64(p->regs + PHB_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR1_STATUS_MASK, 0x0000000000000000);
+
+ /* Init_108_116: Configure MMIO error traps & clear old state */
+ out_be64(p->regs + PHB_OUT_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR_LEM_ENABLE, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_OUT_ERR_FREEZE_ENABLE, 0x0000430803000000);
+ out_be64(p->regs + PHB_OUT_ERR_AIB_FENCE_ENABLE, 0x9df3bc00f0f0700f);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS_MASK, 0x0000000000000000);
+
+ /* Init_117_125: Configure DMA_A error traps & clear old state */
+ out_be64(p->regs + PHB_INA_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR_LEM_ENABLE, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_INA_ERR_FREEZE_ENABLE, 0xc00003ff01006000);
+ out_be64(p->regs + PHB_INA_ERR_AIB_FENCE_ENABLE, 0x3fff50007e559fd8);
+ out_be64(p->regs + PHB_INA_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS_MASK, 0x0000000000000000);
+
+ /* Init_126_134: Configure DMA_B error traps & clear old state */
+ out_be64(p->regs + PHB_INB_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_LEM_ENABLE, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_AIB_FENCE_ENABLE, 0x18ff80ffff7f0000);
+ out_be64(p->regs + PHB_INB_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS_MASK, 0x0000000000000000);
+
+ /* Init_135..138: Cleanup & configure LEM */
+ out_be64(p->regs + PHB_LEM_FIR_ACCUM, 0x0000000000000000);
+ out_be64(p->regs + PHB_LEM_ACTION0, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_LEM_ACTION1, 0x0000000000000000);
+ out_be64(p->regs + PHB_LEM_WOF, 0x0000000000000000);
+}
+
+/* p7ioc_phb_init - Initialize the PHB hardware
+ *
+ * This is currently only called at boot time. It will eventually
+ * be called at runtime, for example in some cases of error recovery
+ * after a PHB reset in which case we might need locks etc...
+ */
+int64_t p7ioc_phb_init(struct p7ioc_phb *p)
+{
+ uint64_t val;
+
+ PHBDBG(p, "Initializing PHB %d...\n", p->index);
+
+ p->state = P7IOC_PHB_STATE_INITIALIZING;
+
+ /* For some reason, the doc wants us to read the version
+ * register, so let's do it. We shoud probably check that
+ * the value makes sense...
+ */
+ val = in_be64(p->regs_asb + PHB_VERSION);
+
+ PHBDBG(p, "Version reg: %llx\n", val);
+
+ /*
+ * Configure AIB operations
+ *
+ * This register maps upbound commands to AIB channels.
+ * DMA Write=0, DMA Read=2, MMIO Load Response=1,
+ * Interrupt Request=1, TCE Read=3.
+ */
+ /* Init_1: AIB TX Channel Mapping */
+ out_be64(p->regs_asb + PHB_AIB_TX_CHAN_MAPPING, 0x0211300000000000);
+
+ /*
+ * This group of steps initializes the AIB RX credits for
+ * the CI block’s port that is attached to this PHB.
+ *
+ * Channel 0 (Dkill): 32 command credits, 0 data credits
+ * (effectively infinite command credits)
+ * Channel 1 (DMA/TCE Read Responses): 32 command credits, 32 data
+ * credits (effectively infinite
+ * command and data credits)
+ * Channel 2 (Interrupt Reissue/Return): 32 command, 0 data credits
+ * (effectively infinite
+ * command credits)
+ * Channel 3 (MMIO Load/Stores, EOIs): 1 command, 1 data credit
+ */
+
+ /* Init_2: AIB RX Command Credit */
+ out_be64(p->regs_asb + PHB_AIB_RX_CMD_CRED, 0x0020002000200001);
+ /* Init_3: AIB RX Data Credit */
+ out_be64(p->regs_asb + PHB_AIB_RX_DATA_CRED, 0x0000002000000001);
+ /* Init_4: AXIB RX Credit Init Timer */
+ out_be64(p->regs_asb + PHB_AIB_RX_CRED_INIT_TIMER, 0xFF00000000000000);
+
+ /*
+ * Enable all 32 AIB and TCE tags.
+ *
+ * AIB tags are used for DMA read requests.
+ * TCE tags are used for every internal transaction as well as TCE
+ * read requests.
+ */
+
+ /* Init_5: PHB - AIB Tag Enable Register */
+ out_be64(p->regs_asb + PHB_AIB_TAG_ENABLE, 0xFFFFFFFF00000000);
+ /* Init_6: PHB – TCE Tag Enable Register */
+ out_be64(p->regs_asb + PHB_TCE_TAG_ENABLE, 0xFFFFFFFF00000000);
+
+ /* Init_7: PCIE - System Configuration Register
+ *
+ * This is the default value out of reset. This register can be
+ * modified to change the following fields if needed:
+ *
+ * bits 04:09 - SYS_EC0C_MAXLINKWIDTH[5:0]
+ * The default link width is x8. This can be reduced
+ * to x1 or x4, if needed.
+ *
+ * bits 10:12 - SYS_EC04_MAX_PAYLOAD[2:0]
+ *
+ * The default max payload size is 4KB. This can be
+ * reduced to the allowed ranges from 128B
+ * to 2KB if needed.
+ */
+ out_be64(p->regs + PHB_PCIE_SYSTEM_CONFIG, 0x422800FC20000000);
+
+ /* Init_8: PHB - PCI-E Reset Register
+ *
+ * This will deassert reset for the PCI-E cores, including the
+ * PHY and HSS macros. The TLDLP core will begin link training
+ * shortly after this register is written.
+ * This will also assert reset for the internal scan-only error
+ * report macros. The error report macro reset will be deasserted
+ * in a later step.
+ * Firmware will verify in a later step whether the PCI-E link
+ * has been established.
+ *
+ * NOTE: We perform a PERST at the end of the init sequence so
+ * we could probably skip that link training.
+ */
+ out_be64(p->regs + PHB_RESET, 0xE800000000000000);
+
+ /* Init_9: BUID
+ *
+ * Only the top 5 bit of the MSI field are implemented, the bottom
+ * are always 0. Our buid_msi value should also be a multiple of
+ * 16 so it should all fit well
+ */
+ val = SETFIELD(PHB_BUID_LSI, 0ul, P7_BUID_BASE(p->buid_lsi));
+ val |= SETFIELD(PHB_BUID_MSI, 0ul, P7_BUID_BASE(p->buid_msi));
+ out_be64(p->regs + PHB_BUID, val);
+
+ /* Init_10..12: IO Space */
+ out_be64(p->regs + PHB_IO_BASE_ADDR, p->io_base);
+ out_be64(p->regs + PHB_IO_BASE_MASK, ~(PHB_IO_SIZE - 1));
+ out_be64(p->regs + PHB_IO_START_ADDR, 0);
+
+ /* Init_13..15: M32 Space */
+ out_be64(p->regs + PHB_M32_BASE_ADDR, p->m32_base + M32_PCI_START);
+ out_be64(p->regs + PHB_M32_BASE_MASK, ~(M32_PCI_SIZE - 1));
+ out_be64(p->regs + PHB_M32_START_ADDR, M32_PCI_START);
+
+ /* Init_16: PCIE-E Outbound Request Upper Address */
+ out_be64(p->regs + PHB_M64_UPPER_BITS, 0);
+
+ /* Init_17: PCIE-E PHB2 Configuration
+ *
+ * We enable IO, M32, 32-bit MSI and 64-bit MSI
+ */
+ out_be64(p->regs + PHB_PHB2_CONFIG,
+ PHB_PHB2C_32BIT_MSI_EN |
+ PHB_PHB2C_IO_EN |
+ PHB_PHB2C_64BIT_MSI_EN |
+ PHB_PHB2C_M32_EN |
+ PHB_PHB2C_64B_TCE_EN);
+
+ /* Init_18..xx: Reset all IODA tables */
+ p7ioc_ioda_reset(&p->phb, false);
+
+ /* Init_42..47: Clear UTL & DLP error log regs */
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG1, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG2, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG3, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG4, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_DLP_ERRLOG1, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_DLP_ERRLOG2, 0xffffffffffffffff);
+
+ /* Init_48: Wait for DLP core to be out of reset */
+ if (!p7ioc_phb_wait_dlp_reset(p))
+ goto failed;
+
+ /* Init_49 - Clear port status */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xffffffffffffffff);
+
+ /* Init_50..81: Init root complex config space */
+ if (!p7ioc_phb_init_rc_cfg(p))
+ goto failed;
+
+ /* Init_82..94 : Init UTL */
+ p7ioc_phb_init_utl(p);
+
+ /* Init_95: PCI-E Reset, deassert reset for internal error macros */
+ out_be64(p->regs + PHB_RESET, 0xe000000000000000);
+
+ /* Init_96: PHB Control register. Various PHB settings:
+ *
+ * - Enable ECC for various internal RAMs
+ * - Enable all TCAM entries
+ * - Set failed DMA read requests to return Completer Abort on error
+ */
+ out_be64(p->regs + PHB_CONTROL, 0x7f38000000000000);
+
+ /* Init_97: Legacy Control register
+ *
+ * The spec sets bit 0 to enable DKill to flush the TCEs. We do not
+ * use that mechanism however, we require the OS to directly access
+ * the TCE Kill register, so we leave that bit set to 0
+ */
+ out_be64(p->regs + PHB_LEGACY_CTRL, 0x0000000000000000);
+
+ /* Init_98..138 : Setup error registers */
+ p7ioc_phb_init_errors(p);
+
+ /* Init_139: Read error summary */
+ val = in_be64(p->regs + PHB_ETU_ERR_SUMMARY);
+ if (val) {
+ PHBERR(p, "Errors detected during PHB init: 0x%16llx\n", val);
+ goto failed;
+ }
+
+ /* Steps Init_140..142 have been removed from the spec. */
+
+ /* Init_143..144: Enable IO, MMIO, Bus master etc... and clear
+ * status bits
+ */
+ p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_STAT,
+ PCI_CFG_STAT_SENT_TABORT |
+ PCI_CFG_STAT_RECV_TABORT |
+ PCI_CFG_STAT_RECV_MABORT |
+ PCI_CFG_STAT_SENT_SERR |
+ PCI_CFG_STAT_RECV_PERR);
+ p7ioc_pcicfg_write16(&p->phb, 0, PCI_CFG_CMD,
+ PCI_CFG_CMD_SERR_EN |
+ PCI_CFG_CMD_PERR_RESP |
+ PCI_CFG_CMD_BUS_MASTER_EN |
+ PCI_CFG_CMD_MEM_EN |
+ PCI_CFG_CMD_IO_EN);
+
+ /* At this point, the spec suggests doing a bus walk. However we
+ * haven't powered up the slots with the SHCP controller. We'll
+ * deal with that and link training issues later, for now, let's
+ * enable the full range of error detection
+ */
+
+ /* Init_145..149: Enable error interrupts and LEM */
+ out_be64(p->regs + PHB_ERR_IRQ_ENABLE, 0x0000000061c00000);
+ out_be64(p->regs + PHB_OUT_ERR_IRQ_ENABLE, 0x0000430803000000);
+ out_be64(p->regs + PHB_INA_ERR_IRQ_ENABLE, 0xc00003ff01006000);
+ out_be64(p->regs + PHB_INB_ERR_IRQ_ENABLE, 0x0000000000000000);
+ out_be64(p->regs + PHB_LEM_ERROR_MASK, 0x1249a1147f500f2c);
+
+ /* Init_150: Enable DMA read/write TLP address speculation */
+ out_be64(p->regs + PHB_TCE_PREFETCH, 0x0000c00000000000);
+
+ /* Init_151..152: Set various timeouts */
+ out_be64(p->regs + PHB_TIMEOUT_CTRL1, 0x1611112010200000);
+ out_be64(p->regs + PHB_TIMEOUT_CTRL2, 0x0000561300000000);
+
+ /* Mark the PHB as functional which enables all the various sequences */
+ p->state = P7IOC_PHB_STATE_FUNCTIONAL;
+
+ return OPAL_SUCCESS;
+
+ failed:
+ PHBERR(p, "Initialization failed\n");
+ p->state = P7IOC_PHB_STATE_BROKEN;
+
+ return OPAL_HARDWARE;
+}
+
+void p7ioc_phb_reset(struct phb *phb)
+{
+ struct p7ioc_phb *p = phb_to_p7ioc_phb(phb);
+ struct p7ioc *ioc = p->ioc;
+ uint64_t ci_idx, rreg;
+ unsigned int i;
+ bool fenced;
+
+ /* Check our fence status. The fence bits we care about are
+ * two bits per PHB at IBM bit location 14 and 15 + 4*phb
+ */
+ fenced = p7ioc_phb_fenced(p);
+
+ PHBDBG(p, "PHB reset... (fenced: %d)\n", (int)fenced);
+
+ /*
+ * If not fenced and already functional, let's do an IODA reset
+ * to clear pending DMAs and wait a bit for thing to settle. It's
+ * notable that the IODA table cache won't be emptied so that we
+ * can restore them during error recovery.
+ */
+ if (p->state == P7IOC_PHB_STATE_FUNCTIONAL && !fenced) {
+ PHBDBG(p, " ioda reset ...\n");
+ p7ioc_ioda_reset(&p->phb, false);
+ time_wait_ms(100);
+ }
+
+ /* CI port index */
+ ci_idx = p->index + 2;
+
+ /* Reset register bits for this PHB */
+ rreg = 0;/*PPC_BIT(8 + ci_idx * 2);*/ /* CI port config reset */
+ rreg |= PPC_BIT(9 + ci_idx * 2); /* CI port func reset */
+ rreg |= PPC_BIT(32 + p->index); /* PHBn config reset */
+
+ /* Mask various errors during reset and clear pending errors */
+ out_be64(ioc->regs + P7IOC_CIn_LEM_ERR_MASK(ci_idx),
+ 0xa4f4000000000000ul);
+ out_be64(p->regs_asb + PHB_LEM_ERROR_MASK, 0xadb650c9808dd051ul);
+ out_be64(ioc->regs + P7IOC_CIn_LEM_FIR(ci_idx), 0);
+
+ /* We need to retry in case the fence doesn't lift due to a
+ * problem with lost credits (HW guys). How many times ?
+ */
+#define MAX_PHB_RESET_RETRIES 5
+ for (i = 0; i < MAX_PHB_RESET_RETRIES; i++) {
+ PHBDBG(p, " reset try %d...\n", i);
+ /* Apply reset */
+ out_be64(ioc->regs + P7IOC_CCRR, rreg);
+ time_wait_ms(1);
+ out_be64(ioc->regs + P7IOC_CCRR, 0);
+
+ /* Check if fence lifed */
+ fenced = p7ioc_phb_fenced(p);
+ PHBDBG(p, " fenced: %d...\n", (int)fenced);
+ if (!fenced)
+ break;
+ }
+
+ /* Reset failed, not much to do, maybe add an error return */
+ if (fenced) {
+ PHBERR(p, "Reset failed, fence still set !\n");
+ p->state = P7IOC_PHB_STATE_BROKEN;
+ return;
+ }
+
+ /* Wait a bit */
+ time_wait_ms(100);
+
+ /* Re-initialize the PHB */
+ p7ioc_phb_init(p);
+
+ /* Restore the CI error mask */
+ out_be64(ioc->regs + P7IOC_CIn_LEM_ERR_MASK_AND(ci_idx), 0);
+}
+
diff --git a/hw/p7ioc.c b/hw/p7ioc.c
new file mode 100644
index 0000000..9aa6480
--- /dev/null
+++ b/hw/p7ioc.c
@@ -0,0 +1,677 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <p7ioc.h>
+#include <p7ioc-regs.h>
+#include <cec.h>
+#include <opal.h>
+#include <io.h>
+#include <vpd.h>
+#include <interrupts.h>
+#include <ccan/str/str.h>
+
+/*
+ * Determine the base address of LEM registers according to
+ * the indicated error source.
+ */
+static void *p7ioc_LEM_base(struct p7ioc *ioc, uint32_t err_src)
+{
+ uint32_t index;
+ void *base = NULL;
+
+ switch (err_src) {
+ case P7IOC_ERR_SRC_RGC:
+ base = ioc->regs + P7IOC_RGC_LEM_BASE;
+ break;
+ case P7IOC_ERR_SRC_BI_UP:
+ base = ioc->regs + P7IOC_BI_UP_LEM_BASE;
+ break;
+ case P7IOC_ERR_SRC_BI_DOWN:
+ base = ioc->regs + P7IOC_BI_DOWN_LEM_BASE;
+ break;
+ case P7IOC_ERR_SRC_CI_P0:
+ case P7IOC_ERR_SRC_CI_P1:
+ case P7IOC_ERR_SRC_CI_P2:
+ case P7IOC_ERR_SRC_CI_P3:
+ case P7IOC_ERR_SRC_CI_P4:
+ case P7IOC_ERR_SRC_CI_P5:
+ case P7IOC_ERR_SRC_CI_P6:
+ case P7IOC_ERR_SRC_CI_P7:
+ index = err_src - P7IOC_ERR_SRC_CI_P0;
+ base = ioc->regs + P7IOC_CI_PORTn_LEM_BASE(index);
+ break;
+ case P7IOC_ERR_SRC_PHB0:
+ case P7IOC_ERR_SRC_PHB1:
+ case P7IOC_ERR_SRC_PHB2:
+ case P7IOC_ERR_SRC_PHB3:
+ case P7IOC_ERR_SRC_PHB4:
+ case P7IOC_ERR_SRC_PHB5:
+ index = err_src - P7IOC_ERR_SRC_PHB0;
+ base = ioc->regs + P7IOC_PHBn_LEM_BASE(index);
+ break;
+ case P7IOC_ERR_SRC_MISC:
+ base = ioc->regs + P7IOC_MISC_LEM_BASE;
+ break;
+ case P7IOC_ERR_SRC_I2C:
+ base = ioc->regs + P7IOC_I2C_LEM_BASE;
+ break;
+ default:
+ prerror("%s: Unknown error source %d\n",
+ __func__, err_src);
+ }
+
+ return base;
+}
+
+static void p7ioc_get_diag_common(struct p7ioc *ioc,
+ void *base,
+ struct OpalIoP7IOCErrorData *data)
+{
+ /* GEM */
+ data->gemXfir = in_be64(ioc->regs + P7IOC_GEM_XFIR);
+ data->gemRfir = in_be64(ioc->regs + P7IOC_GEM_RFIR);
+ data->gemRirqfir = in_be64(ioc->regs + P7IOC_GEM_RIRQFIR);
+ data->gemMask = in_be64(ioc->regs + P7IOC_GEM_MASK);
+ data->gemRwof = in_be64(ioc->regs + P7IOC_GEM_RWOF);
+
+ /* LEM */
+ data->lemFir = in_be64(base + P7IOC_LEM_FIR_OFFSET);
+ data->lemErrMask = in_be64(base + P7IOC_LEM_ERR_MASK_OFFSET);
+ data->lemAction0 = in_be64(base + P7IOC_LEM_ACTION_0_OFFSET);
+ data->lemAction1 = in_be64(base + P7IOC_LEM_ACTION_1_OFFSET);
+ data->lemWof = in_be64(base + P7IOC_LEM_WOF_OFFSET);
+}
+
+static int64_t p7ioc_get_diag_data(struct io_hub *hub,
+ void *diag_buffer,
+ uint64_t diag_buffer_len)
+{
+ struct p7ioc *ioc = iohub_to_p7ioc(hub);
+ struct OpalIoP7IOCErrorData *data = diag_buffer;
+ void *base;
+
+ /* Make sure we have enough buffer */
+ if (diag_buffer_len < sizeof(struct OpalIoP7IOCErrorData))
+ return OPAL_PARAMETER;
+
+ /* We need do nothing if there're no pending errors */
+ if (!p7ioc_err_pending(ioc))
+ return OPAL_CLOSED;
+
+ /*
+ * We needn't collect diag-data for CI Port{2, ..., 7}
+ * and PHB{0, ..., 5} since their errors (except GXE)
+ * have been cached to the specific PHB.
+ */
+ base = p7ioc_LEM_base(ioc, ioc->err.err_src);
+ if (!base) {
+ p7ioc_set_err_pending(ioc, false);
+ return OPAL_INTERNAL_ERROR;
+ }
+
+ switch (ioc->err.err_src) {
+ case P7IOC_ERR_SRC_RGC:
+ data->type = OPAL_P7IOC_DIAG_TYPE_RGC;
+ p7ioc_get_diag_common(ioc, base, data);
+
+ data->rgc.rgcStatus = in_be64(ioc->regs + 0x3E1C10);
+ data->rgc.rgcLdcp = in_be64(ioc->regs + 0x3E1C18);
+
+ break;
+ case P7IOC_ERR_SRC_BI_UP:
+ data->type = OPAL_P7IOC_DIAG_TYPE_BI;
+ data->bi.biDownbound = 0;
+ p7ioc_get_diag_common(ioc, base, data);
+
+ data->bi.biLdcp0 = in_be64(ioc->regs + 0x3C0100);
+ data->bi.biLdcp1 = in_be64(ioc->regs + 0x3C0108);
+ data->bi.biLdcp2 = in_be64(ioc->regs + 0x3C0110);
+ data->bi.biFenceStatus = in_be64(ioc->regs + 0x3C0130);
+
+ break;
+ case P7IOC_ERR_SRC_BI_DOWN:
+ data->type = OPAL_P7IOC_DIAG_TYPE_BI;
+ data->bi.biDownbound = 1;
+ p7ioc_get_diag_common(ioc, base, data);
+
+ data->bi.biLdcp0 = in_be64(ioc->regs + 0x3C0118);
+ data->bi.biLdcp1 = in_be64(ioc->regs + 0x3C0120);
+ data->bi.biLdcp2 = in_be64(ioc->regs + 0x3C0128);
+ data->bi.biFenceStatus = in_be64(ioc->regs + 0x3C0130);
+
+ break;
+ case P7IOC_ERR_SRC_CI_P0:
+ case P7IOC_ERR_SRC_CI_P1:
+ data->type = OPAL_P7IOC_DIAG_TYPE_CI;
+ data->ci.ciPort = ioc->err.err_src - P7IOC_ERR_SRC_CI_P0;
+ p7ioc_get_diag_common(ioc, base, data);
+
+ data->ci.ciPortStatus = in_be64(base + 0x008);
+ data->ci.ciPortLdcp = in_be64(base + 0x010);
+ break;
+ case P7IOC_ERR_SRC_MISC:
+ data->type = OPAL_P7IOC_DIAG_TYPE_MISC;
+ p7ioc_get_diag_common(ioc, base, data);
+ break;
+ case P7IOC_ERR_SRC_I2C:
+ data->type = OPAL_P7IOC_DIAG_TYPE_I2C;
+ p7ioc_get_diag_common(ioc, base, data);
+ break;
+ default:
+ p7ioc_set_err_pending(ioc, false);
+ return OPAL_CLOSED;
+ }
+
+ /* For errors of MAL class, we need mask it */
+ if (ioc->err.err_class == P7IOC_ERR_CLASS_MAL)
+ out_be64(base + P7IOC_LEM_ERR_MASK_OR_OFFSET,
+ PPC_BIT(63 - ioc->err.err_bit));
+ p7ioc_set_err_pending(ioc, false);
+
+ return OPAL_SUCCESS;
+}
+
+static const struct io_hub_ops p7ioc_hub_ops = {
+ .set_tce_mem = NULL, /* No set_tce_mem for p7ioc, we use FMTC */
+ .get_diag_data = p7ioc_get_diag_data,
+ .reset = p7ioc_reset,
+};
+
+static int64_t p7ioc_rgc_get_xive(void *data, uint32_t isn,
+ uint16_t *server, uint8_t *prio)
+{
+ struct p7ioc *ioc = data;
+ uint32_t irq = (isn & 0xf);
+ uint32_t fbuid = P7_IRQ_FBUID(isn);
+ uint64_t xive;
+
+ if (fbuid != ioc->rgc_buid)
+ return OPAL_PARAMETER;
+
+ xive = ioc->xive_cache[irq];
+ *server = GETFIELD(IODA_XIVT_SERVER, xive);
+ *prio = GETFIELD(IODA_XIVT_PRIORITY, xive);
+
+ return OPAL_SUCCESS;
+ }
+
+static int64_t p7ioc_rgc_set_xive(void *data, uint32_t isn,
+ uint16_t server, uint8_t prio)
+{
+ struct p7ioc *ioc = data;
+ uint32_t irq = (isn & 0xf);
+ uint32_t fbuid = P7_IRQ_FBUID(isn);
+ uint64_t xive;
+ uint64_t m_server, m_prio;
+
+ if (fbuid != ioc->rgc_buid)
+ return OPAL_PARAMETER;
+
+ xive = SETFIELD(IODA_XIVT_SERVER, 0ull, server);
+ xive = SETFIELD(IODA_XIVT_PRIORITY, xive, prio);
+ ioc->xive_cache[irq] = xive;
+
+ /* Now we mangle the server and priority */
+ if (prio == 0xff) {
+ m_server = 0;
+ m_prio = 0xff;
+ } else {
+ m_server = server >> 3;
+ m_prio = (prio >> 3) | ((server & 7) << 5);
+ }
+
+ /* Update the XIVE. Don't care HRT entry on P7IOC */
+ out_be64(ioc->regs + 0x3e1820, (0x0002000000000000 | irq));
+ xive = in_be64(ioc->regs + 0x3e1830);
+ xive = SETFIELD(IODA_XIVT_SERVER, xive, m_server);
+ xive = SETFIELD(IODA_XIVT_PRIORITY, xive, m_prio);
+ out_be64(ioc->regs + 0x3e1830, xive);
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * The function is used to figure out the error class and error
+ * bit according to LEM WOF.
+ *
+ * The bits of WOF register have been classified according to
+ * the error severity. Of course, we should process those errors
+ * with higher priority. For example, there have 2 errors (GXE, INF)
+ * pending, we should process GXE, and INF is meaningless in face
+ * of GXE.
+ */
+static bool p7ioc_err_bit(struct p7ioc *ioc, uint64_t wof)
+{
+ uint64_t val, severity[P7IOC_ERR_CLASS_LAST];
+ int32_t class, bit, err_bit = -1;
+
+ /* Clear severity array */
+ memset(severity, 0, sizeof(uint64_t) * P7IOC_ERR_CLASS_LAST);
+
+ /*
+ * The severity array has fixed values. However, it depends
+ * on the damage settings for individual components. We're
+ * using fixed values based on the assumption that damage settings
+ * are fixed for now. If we change it some day, we also need
+ * change the severity array accordingly. Anyway, it's something
+ * to improve in future so that we can figure out the severity
+ * array from hardware registers.
+ */
+ switch (ioc->err.err_src) {
+ case P7IOC_ERR_SRC_EI:
+ /* EI won't create interrupt yet */
+ break;
+ case P7IOC_ERR_SRC_RGC:
+ severity[P7IOC_ERR_CLASS_GXE] = 0xF00086E0F4FCFFFF;
+ severity[P7IOC_ERR_CLASS_RGA] = 0x0000010000000000;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0FFF781F0B030000;
+ break;
+ case P7IOC_ERR_SRC_BI_UP:
+ severity[P7IOC_ERR_CLASS_GXE] = 0xF7FFFFFF7FFFFFFF;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0800000080000000;
+ break;
+ case P7IOC_ERR_SRC_BI_DOWN:
+ severity[P7IOC_ERR_CLASS_GXE] = 0xDFFFF7F35F8000BF;
+ severity[P7IOC_ERR_CLASS_INF] = 0x2000080CA07FFF40;
+ break;
+ case P7IOC_ERR_SRC_CI_P0:
+ severity[P7IOC_ERR_CLASS_GXE] = 0xF5FF000000000000;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0200FFFFFFFFFFFF;
+ severity[P7IOC_ERR_CLASS_MAL] = 0x0800000000000000;
+ break;
+ case P7IOC_ERR_SRC_CI_P1:
+ severity[P7IOC_ERR_CLASS_GXE] = 0xFFFF000000000000;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0000FFFFFFFFFFFF;
+ break;
+ case P7IOC_ERR_SRC_CI_P2:
+ case P7IOC_ERR_SRC_CI_P3:
+ case P7IOC_ERR_SRC_CI_P4:
+ case P7IOC_ERR_SRC_CI_P5:
+ case P7IOC_ERR_SRC_CI_P6:
+ case P7IOC_ERR_SRC_CI_P7:
+ severity[P7IOC_ERR_CLASS_GXE] = 0x5B0B000000000000;
+ severity[P7IOC_ERR_CLASS_PHB] = 0xA4F4000000000000;
+ severity[P7IOC_ERR_CLASS_INF] = 0x0000FFFFFFFFFFFF;
+ break;
+ case P7IOC_ERR_SRC_MISC:
+ severity[P7IOC_ERR_CLASS_GXE] = 0x0000000310000000;
+ severity[P7IOC_ERR_CLASS_PLL] = 0x0000000001C00000;
+ severity[P7IOC_ERR_CLASS_INF] = 0x555FFFF0EE3FFFFF;
+ severity[P7IOC_ERR_CLASS_MAL] = 0xAAA0000C00000000;
+ break;
+ case P7IOC_ERR_SRC_I2C:
+ severity[P7IOC_ERR_CLASS_GXE] = 0x1100000000000000;
+ severity[P7IOC_ERR_CLASS_INF] = 0xEEFFFFFFFFFFFFFF;
+ break;
+ case P7IOC_ERR_SRC_PHB0:
+ case P7IOC_ERR_SRC_PHB1:
+ case P7IOC_ERR_SRC_PHB2:
+ case P7IOC_ERR_SRC_PHB3:
+ case P7IOC_ERR_SRC_PHB4:
+ case P7IOC_ERR_SRC_PHB5:
+ severity[P7IOC_ERR_CLASS_PHB] = 0xADB650CB808DD051;
+ severity[P7IOC_ERR_CLASS_ER] = 0x0000A0147F50092C;
+ severity[P7IOC_ERR_CLASS_INF] = 0x52490F2000222682;
+ break;
+ }
+
+ /*
+ * The error class (ERR_CLASS) has been defined based on
+ * their severity. The priority of those errors out of same
+ * class should be defined based on the position of corresponding
+ * bit in LEM (Local Error Macro) register.
+ */
+ for (class = P7IOC_ERR_CLASS_NONE + 1;
+ err_bit < 0 && class < P7IOC_ERR_CLASS_LAST;
+ class++) {
+ val = wof & severity[class];
+ if (!val) continue;
+
+ for (bit = 0; bit < 64; bit++) {
+ if (val & PPC_BIT(bit)) {
+ err_bit = 63 - bit;
+ break;
+ }
+ }
+ }
+
+ /* If we don't find the error bit, we needn't go on. */
+ if (err_bit < 0)
+ return false;
+
+ ioc->err.err_class = class - 1;
+ ioc->err.err_bit = err_bit;
+ return true;
+}
+
+/*
+ * Check LEM to determine the detailed error information.
+ * The function is expected to be called while OS calls
+ * to OPAL API opal_pci_next_error(). Eventually, the errors
+ * from CI Port{2, ..., 7} or PHB{0, ..., 5} would be cached
+ * to the specific PHB, the left errors would be cached to
+ * the IOC.
+ */
+bool p7ioc_check_LEM(struct p7ioc *ioc,
+ uint16_t *pci_error_type,
+ uint16_t *severity)
+{
+ void *base;
+ uint64_t fir, wof, mask;
+ struct p7ioc_phb *p;
+ int32_t index;
+ bool ret;
+
+ /* Make sure we have error pending on IOC */
+ if (!p7ioc_err_pending(ioc))
+ return false;
+
+ /*
+ * The IOC probably has been put to fatal error
+ * state (GXE) because of failure on reading on
+ * GEM FIR.
+ */
+ if (ioc->err.err_src == P7IOC_ERR_SRC_NONE &&
+ ioc->err.err_class != P7IOC_ERR_CLASS_NONE)
+ goto err;
+
+ /*
+ * Get the base address of LEM registers according
+ * to the error source. If we failed to get that,
+ * the error pending flag would be cleared.
+ */
+ base = p7ioc_LEM_base(ioc, ioc->err.err_src);
+ if (!base) {
+ p7ioc_set_err_pending(ioc, false);
+ return false;
+ }
+
+ /* IOC would be broken upon broken FIR */
+ fir = in_be64(base + P7IOC_LEM_FIR_OFFSET);
+ if (fir == 0xffffffffffffffff) {
+ ioc->err.err_src = P7IOC_ERR_SRC_NONE;
+ ioc->err.err_class = P7IOC_ERR_CLASS_GXE;
+ goto err;
+ }
+
+ /* Read on ERR_MASK and WOF. However, we needn't do for PHBn */
+ wof = in_be64(base + P7IOC_LEM_WOF_OFFSET);
+ if (ioc->err.err_src >= P7IOC_ERR_SRC_PHB0 &&
+ ioc->err.err_src <= P7IOC_ERR_SRC_PHB5) {
+ mask = 0x0ull;
+ } else {
+ mask = in_be64(base + P7IOC_LEM_ERR_MASK_OFFSET);
+ in_be64(base + P7IOC_LEM_ACTION_0_OFFSET);
+ in_be64(base + P7IOC_LEM_ACTION_1_OFFSET);
+ }
+
+ /*
+ * We need process those unmasked error first. If we're
+ * failing to get the error bit, we needn't proceed.
+ */
+ if (wof & ~mask)
+ wof &= ~mask;
+ if (!wof) {
+ p7ioc_set_err_pending(ioc, false);
+ return false;
+ }
+
+ if (!p7ioc_err_bit(ioc, wof)) {
+ p7ioc_set_err_pending(ioc, false);
+ return false;
+ }
+
+err:
+ /*
+ * We run into here because of valid error. Those errors
+ * from CI Port{2, ..., 7} and PHB{0, ..., 5} will be cached
+ * to the specific PHB. However, we will cache the global
+ * errors (e.g. GXE) to IOC directly. For the left errors,
+ * they will be cached to IOC.
+ */
+ if (((ioc->err.err_src >= P7IOC_ERR_SRC_CI_P2 &&
+ ioc->err.err_src <= P7IOC_ERR_SRC_CI_P7) ||
+ (ioc->err.err_src >= P7IOC_ERR_SRC_PHB0 &&
+ ioc->err.err_src <= P7IOC_ERR_SRC_PHB5)) &&
+ ioc->err.err_class != P7IOC_ERR_CLASS_GXE) {
+ index = (ioc->err.err_src >= P7IOC_ERR_SRC_PHB0 &&
+ ioc->err.err_src <= P7IOC_ERR_SRC_PHB5) ?
+ (ioc->err.err_src - P7IOC_ERR_SRC_PHB0) :
+ (ioc->err.err_src - P7IOC_ERR_SRC_CI_P2);
+ p = &ioc->phbs[index];
+
+ if (p7ioc_phb_enabled(ioc, index)) {
+ p->err.err_src = ioc->err.err_src;
+ p->err.err_class = ioc->err.err_class;
+ p->err.err_bit = ioc->err.err_bit;
+ p7ioc_phb_set_err_pending(p, true);
+ p7ioc_set_err_pending(ioc, false);
+
+ return false;
+ }
+ }
+
+ /*
+ * Map the internal error class to that OS can recognize.
+ * Errors from PHB or the associated CI port would be
+ * GXE, PHB-fatal, ER, or INF. For the case, GXE will be
+ * cached to IOC and the left classes will be cached to
+ * the specific PHB.
+ */
+ switch (ioc->err.err_class) {
+ case P7IOC_ERR_CLASS_GXE:
+ case P7IOC_ERR_CLASS_PLL:
+ case P7IOC_ERR_CLASS_RGA:
+ *pci_error_type = OPAL_EEH_IOC_ERROR;
+ *severity = OPAL_EEH_SEV_IOC_DEAD;
+ ret = true;
+ break;
+ case P7IOC_ERR_CLASS_INF:
+ case P7IOC_ERR_CLASS_MAL:
+ *pci_error_type = OPAL_EEH_IOC_ERROR;
+ *severity = OPAL_EEH_SEV_INF;
+ ret = false;
+ break;
+ default:
+ p7ioc_set_err_pending(ioc, false);
+ ret = false;
+ }
+
+ return ret;
+}
+
+/*
+ * Check GEM to see if there has any problematic components.
+ * The function is expected to be called in RGC interrupt
+ * handler. Also, it's notable that failure on reading on
+ * XFIR will cause GXE directly.
+ */
+static bool p7ioc_check_GEM(struct p7ioc *ioc)
+{
+ uint64_t xfir, rwof;
+
+ /*
+ * Recov_5: Read GEM Xfir
+ * Recov_6: go to GXE recovery?
+ */
+ xfir = in_be64(ioc->regs + P7IOC_GEM_XFIR);
+ if (xfir == 0xffffffffffffffff) {
+ ioc->err.err_src = P7IOC_ERR_SRC_NONE;
+ ioc->err.err_class = P7IOC_ERR_CLASS_GXE;
+ p7ioc_set_err_pending(ioc, true);
+ return true;
+ }
+
+ /*
+ * Recov_7: Read GEM Rfir
+ * Recov_8: Read GEM RIRQfir
+ * Recov_9: Read GEM RWOF
+ * Recov_10: Read Fence Shadow
+ * Recov_11: Read Fence Shadow WOF
+ */
+ in_be64(ioc->regs + P7IOC_GEM_RFIR);
+ in_be64(ioc->regs + P7IOC_GEM_RIRQFIR);
+ rwof = in_be64(ioc->regs + P7IOC_GEM_RWOF);
+ in_be64(ioc->regs + P7IOC_CHIP_FENCE_SHADOW);
+ in_be64(ioc->regs + P7IOC_CHIP_FENCE_WOF);
+
+ /*
+ * Check GEM RWOF to see which component has been
+ * put into problematic state.
+ */
+ ioc->err.err_src = P7IOC_ERR_SRC_NONE;
+ if (rwof & PPC_BIT(1)) ioc->err.err_src = P7IOC_ERR_SRC_RGC;
+ else if (rwof & PPC_BIT(2)) ioc->err.err_src = P7IOC_ERR_SRC_BI_UP;
+ else if (rwof & PPC_BIT(3)) ioc->err.err_src = P7IOC_ERR_SRC_BI_DOWN;
+ else if (rwof & PPC_BIT(4)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P0;
+ else if (rwof & PPC_BIT(5)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P1;
+ else if (rwof & PPC_BIT(6)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P2;
+ else if (rwof & PPC_BIT(7)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P3;
+ else if (rwof & PPC_BIT(8)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P4;
+ else if (rwof & PPC_BIT(9)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P5;
+ else if (rwof & PPC_BIT(10)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P6;
+ else if (rwof & PPC_BIT(11)) ioc->err.err_src = P7IOC_ERR_SRC_CI_P7;
+ else if (rwof & PPC_BIT(16)) ioc->err.err_src = P7IOC_ERR_SRC_PHB0;
+ else if (rwof & PPC_BIT(17)) ioc->err.err_src = P7IOC_ERR_SRC_PHB1;
+ else if (rwof & PPC_BIT(18)) ioc->err.err_src = P7IOC_ERR_SRC_PHB2;
+ else if (rwof & PPC_BIT(19)) ioc->err.err_src = P7IOC_ERR_SRC_PHB3;
+ else if (rwof & PPC_BIT(20)) ioc->err.err_src = P7IOC_ERR_SRC_PHB4;
+ else if (rwof & PPC_BIT(21)) ioc->err.err_src = P7IOC_ERR_SRC_PHB5;
+ else if (rwof & PPC_BIT(24)) ioc->err.err_src = P7IOC_ERR_SRC_MISC;
+ else if (rwof & PPC_BIT(25)) ioc->err.err_src = P7IOC_ERR_SRC_I2C;
+
+ /*
+ * If we detect any problematic components, the OS is
+ * expected to poll that for more details through OPAL
+ * interface.
+ */
+ if (ioc->err.err_src != P7IOC_ERR_SRC_NONE) {
+ p7ioc_set_err_pending(ioc, true);
+ return true;
+ }
+
+ return false;
+}
+
+static void p7ioc_rgc_interrupt(void *data, uint32_t isn)
+{
+ struct p7ioc *ioc = data;
+
+ printf("Got RGC interrupt 0x%04x\n", isn);
+
+ /* We will notify OS while getting error from GEM */
+ if (p7ioc_check_GEM(ioc))
+ opal_update_pending_evt(OPAL_EVENT_PCI_ERROR,
+ OPAL_EVENT_PCI_ERROR);
+}
+
+static const struct irq_source_ops p7ioc_rgc_irq_ops = {
+ .get_xive = p7ioc_rgc_get_xive,
+ .set_xive = p7ioc_rgc_set_xive,
+ .interrupt = p7ioc_rgc_interrupt,
+};
+
+static void p7ioc_create_hub(struct dt_node *np)
+{
+ struct p7ioc *ioc;
+ unsigned int i, id;
+ u64 bar1, bar2;
+ u32 pdt;
+ char *path;
+
+ /* Use the BUID extension as ID and add it to device-tree */
+ id = dt_prop_get_u32(np, "ibm,buid-ext");
+ path = dt_get_path(np);
+ printf("P7IOC: Found at %s ID 0x%x\n", path, id);
+ free(path);
+
+ /* Load VPD LID */
+ vpd_iohub_load(np);
+
+ ioc = zalloc(sizeof(struct p7ioc));
+ if (!ioc)
+ return;
+ ioc->hub.hub_id = id;
+ ioc->hub.ops = &p7ioc_hub_ops;
+ ioc->dt_node = np;
+
+ bar1 = dt_prop_get_u64(np, "ibm,gx-bar-1");
+ bar2 = dt_prop_get_u64(np, "ibm,gx-bar-2");
+
+ ioc->regs = (void *)bar1;
+
+ ioc->mmio1_win_start = bar1;
+ ioc->mmio1_win_size = MWIN1_SIZE;
+ ioc->mmio2_win_start = bar2;
+ ioc->mmio2_win_size = MWIN2_SIZE;
+
+ ioc->buid_base = id << 9;
+ ioc->rgc_buid = ioc->buid_base + RGC_BUID_OFFSET;
+
+ /* Add some DT properties */
+ dt_add_property_cells(np, "ibm,opal-hubid", 0, id);
+
+ /* XXX Fixme: how many RGC interrupts ? */
+ dt_add_property_cells(np, "interrupts", ioc->rgc_buid << 4);
+ dt_add_property_cells(np, "interrupt-base", ioc->rgc_buid << 4);
+
+ /* XXX What about ibm,opal-mmio-real ? */
+
+ /* Clear the RGC XIVE cache */
+ for (i = 0; i < 16; i++)
+ ioc->xive_cache[i] = SETFIELD(IODA_XIVT_PRIORITY, 0ull, 0xff);
+
+ /*
+ * Register RGC interrupts
+ *
+ * For now I assume only 0 is... to verify with Greg or HW guys,
+ * we support all 16
+ */
+ register_irq_source(&p7ioc_rgc_irq_ops, ioc, ioc->rgc_buid << 4, 1);
+
+ /* Check for presence detect from HDAT, we use only BR1 on P7IOC */
+ pdt = dt_prop_get_u32_def(np, "ibm,br1-presence-detect", 0xffffffff);
+ if (pdt != 0xffffffff)
+ printf("P7IOC: Presence detect from HDAT : 0x%02x\n", pdt);
+ else {
+ }
+ ioc->phb_pdt = pdt & 0xff;
+
+ /* Setup PHB structures (no HW access yet) */
+ for (i = 0; i < P7IOC_NUM_PHBS; i++) {
+ if (p7ioc_phb_enabled(ioc, i))
+ p7ioc_phb_setup(ioc, i);
+ else
+ ioc->phbs[i].state = P7IOC_PHB_STATE_OFF;
+ }
+
+ /* Now, we do the bulk of the inits */
+ p7ioc_inits(ioc);
+
+ printf("P7IOC: Initialization complete\n");
+
+ cec_register(&ioc->hub);
+}
+
+void probe_p7ioc(void)
+{
+ struct dt_node *np;
+
+ dt_for_each_compatible(dt_root, np, "ibm,p7ioc")
+ p7ioc_create_hub(np);
+}
+
diff --git a/hw/phb3.c b/hw/phb3.c
new file mode 100644
index 0000000..5e51e71
--- /dev/null
+++ b/hw/phb3.c
@@ -0,0 +1,3880 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * PHB3 support
+ *
+ */
+
+/*
+ *
+ * FIXME:
+ * More stuff for EEH support:
+ * - PBCQ error reporting interrupt
+ * - I2C-based power management (replacing SHPC)
+ * - Directly detect fenced PHB through one dedicated HW reg
+ */
+
+#include <skiboot.h>
+#include <io.h>
+#include <timebase.h>
+#include <pci.h>
+#include <pci-cfg.h>
+#include <vpd.h>
+#include <interrupts.h>
+#include <opal.h>
+#include <cpu.h>
+#include <device.h>
+#include <ccan/str/str.h>
+#include <ccan/array_size/array_size.h>
+#include <xscom.h>
+#include <affinity.h>
+#include <phb3.h>
+#include <phb3-regs.h>
+#include <capp.h>
+#include <fsp.h>
+
+/* Enable this to disable error interrupts for debug purposes */
+#undef DISABLE_ERR_INTS
+
+static void phb3_init_hw(struct phb3 *p);
+
+static void phb3_trace(struct phb3 *p, FILE *s, const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
+
+static void phb3_trace(struct phb3 *p, FILE *s, const char *fmt, ...)
+{
+ /* Use a temp stack buffer to print all at once to avoid
+ * mixups of a trace entry on SMP
+ */
+ char tbuf[128 + 10];
+ va_list args;
+ char *b = tbuf;
+
+ b += sprintf(b, "PHB%d: ", p->phb.opal_id);
+ va_start(args, fmt);
+ vsnprintf(b, 128, fmt, args);
+ va_end(args);
+ fputs(tbuf, s);
+}
+#define PHBDBG(p, fmt...) phb3_trace(p, stdout, fmt)
+#define PHBINF(p, fmt...) phb3_trace(p, stderr, fmt)
+#define PHBERR(p, fmt...) phb3_trace(p, stderr, fmt)
+
+/*
+ * Lock callbacks. Allows the OPAL API handlers to lock the
+ * PHB around calls such as config space, EEH, etc...
+ */
+static void phb3_lock(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+
+ lock(&p->lock);
+}
+
+static void phb3_unlock(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+
+ unlock(&p->lock);
+}
+
+/* Helper to select an IODA table entry */
+static inline void phb3_ioda_sel(struct phb3 *p, uint32_t table,
+ uint32_t addr, bool autoinc)
+{
+ out_be64(p->regs + PHB_IODA_ADDR,
+ (autoinc ? PHB_IODA_AD_AUTOINC : 0) |
+ SETFIELD(PHB_IODA_AD_TSEL, 0ul, table) |
+ SETFIELD(PHB_IODA_AD_TADR, 0ul, addr));
+}
+
+/* Helper to set the state machine timeout */
+static inline uint64_t phb3_set_sm_timeout(struct phb3 *p, uint64_t dur)
+{
+ uint64_t target, now = mftb();
+
+ target = now + dur;
+ if (target == 0)
+ target++;
+ p->delay_tgt_tb = target;
+
+ return dur;
+}
+
+/* Check if AIB is fenced via PBCQ NFIR */
+static bool phb3_fenced(struct phb3 *p)
+{
+ uint64_t nfir;
+
+ /* We still probably has crazy xscom */
+ xscom_read(p->chip_id, p->pe_xscom + 0x0, &nfir);
+ if (nfir & PPC_BIT(16)) {
+ p->flags |= PHB3_AIB_FENCED;
+ p->state = PHB3_STATE_FENCED;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Configuration space access
+ *
+ * The PHB lock is assumed to be already held
+ */
+static int64_t phb3_pcicfg_check(struct phb3 *p, uint32_t bdfn,
+ uint32_t offset, uint32_t size,
+ uint8_t *pe)
+{
+ uint32_t sm = size - 1;
+
+ if (offset > 0xfff || bdfn > 0xffff)
+ return OPAL_PARAMETER;
+ if (offset & sm)
+ return OPAL_PARAMETER;
+
+ /* The root bus only has a device at 0 and we get into an
+ * error state if we try to probe beyond that, so let's
+ * avoid that and just return an error to Linux
+ */
+ if ((bdfn >> 8) == 0 && (bdfn & 0xff))
+ return OPAL_HARDWARE;
+
+ /* Check PHB state */
+ if (p->state == PHB3_STATE_BROKEN)
+ return OPAL_HARDWARE;
+
+ /* Fetch the PE# from cache */
+ *pe = p->rte_cache[bdfn];
+
+ return OPAL_SUCCESS;
+}
+
+#define PHB3_PCI_CFG_READ(size, type) \
+static int64_t phb3_pcicfg_read##size(struct phb *phb, uint32_t bdfn, \
+ uint32_t offset, type *data) \
+{ \
+ struct phb3 *p = phb_to_phb3(phb); \
+ uint64_t addr, val64; \
+ int64_t rc; \
+ uint8_t pe; \
+ bool use_asb = false; \
+ \
+ /* Initialize data in case of error */ \
+ *data = (type)0xffffffff; \
+ \
+ rc = phb3_pcicfg_check(p, bdfn, offset, sizeof(type), &pe); \
+ if (rc) \
+ return rc; \
+ \
+ if (p->flags & PHB3_AIB_FENCED) { \
+ if (!(p->flags & PHB3_CFG_USE_ASB)) \
+ return OPAL_HARDWARE; \
+ use_asb = true; \
+ } else if ((p->flags & PHB3_CFG_BLOCKED) && bdfn != 0) { \
+ return OPAL_HARDWARE; \
+ } \
+ \
+ addr = PHB_CA_ENABLE | ((uint64_t)bdfn << PHB_CA_FUNC_LSH); \
+ addr = SETFIELD(PHB_CA_REG, addr, offset); \
+ addr = SETFIELD(PHB_CA_PE, addr, pe); \
+ if (use_asb) { \
+ phb3_write_reg_asb(p, PHB_CONFIG_ADDRESS, addr); \
+ sync(); \
+ val64 = bswap_64(phb3_read_reg_asb(p, PHB_CONFIG_DATA)); \
+ *data = (type)(val64 >> (8 * (offset & (4 - sizeof(type))))); \
+ } else { \
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, addr); \
+ *data = in_le##size(p->regs + PHB_CONFIG_DATA + \
+ (offset & (4 - sizeof(type)))); \
+ } \
+ \
+ return OPAL_SUCCESS; \
+}
+
+#define PHB3_PCI_CFG_WRITE(size, type) \
+static int64_t phb3_pcicfg_write##size(struct phb *phb, uint32_t bdfn, \
+ uint32_t offset, type data) \
+{ \
+ struct phb3 *p = phb_to_phb3(phb); \
+ uint64_t addr, val64 = 0; \
+ int64_t rc; \
+ uint8_t pe; \
+ bool use_asb = false; \
+ \
+ rc = phb3_pcicfg_check(p, bdfn, offset, sizeof(type), &pe); \
+ if (rc) \
+ return rc; \
+ \
+ if (p->flags & PHB3_AIB_FENCED) { \
+ if (!(p->flags & PHB3_CFG_USE_ASB)) \
+ return OPAL_HARDWARE; \
+ use_asb = true; \
+ } else if ((p->flags & PHB3_CFG_BLOCKED) && bdfn != 0) { \
+ return OPAL_HARDWARE; \
+ } \
+ \
+ addr = PHB_CA_ENABLE | ((uint64_t)bdfn << PHB_CA_FUNC_LSH); \
+ addr = SETFIELD(PHB_CA_REG, addr, offset); \
+ addr = SETFIELD(PHB_CA_PE, addr, pe); \
+ if (use_asb) { \
+ val64 = data; \
+ val64 = bswap_64(val64 << 8 * (offset & (4 - sizeof(type)))); \
+ phb3_write_reg_asb(p, PHB_CONFIG_ADDRESS, addr); \
+ sync(); \
+ phb3_write_reg_asb(p, PHB_CONFIG_DATA, val64); \
+ } else { \
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, addr); \
+ out_le##size(p->regs + PHB_CONFIG_DATA + \
+ (offset & (4 - sizeof(type))), data); \
+ } \
+ \
+ return OPAL_SUCCESS; \
+}
+
+PHB3_PCI_CFG_READ(8, u8)
+PHB3_PCI_CFG_READ(16, u16)
+PHB3_PCI_CFG_READ(32, u32)
+PHB3_PCI_CFG_WRITE(8, u8)
+PHB3_PCI_CFG_WRITE(16, u16)
+PHB3_PCI_CFG_WRITE(32, u32)
+
+static uint8_t phb3_choose_bus(struct phb *phb __unused,
+ struct pci_device *bridge __unused,
+ uint8_t candidate, uint8_t *max_bus __unused,
+ bool *use_max)
+{
+ /* Use standard bus number selection */
+ *use_max = false;
+ return candidate;
+}
+
+static void phb3_root_port_init(struct phb *phb, struct pci_device *dev,
+ int ecap, int aercap)
+{
+ uint16_t bdfn = dev->bdfn;
+ uint16_t val16;
+ uint32_t val32;
+
+ /* Enable SERR and parity checking */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
+ val16 |= (PCI_CFG_CMD_SERR_EN | PCI_CFG_CMD_PERR_RESP);
+ pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
+
+ /* Enable reporting various errors */
+ if (!ecap) return;
+ pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
+ val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
+ PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT |
+ PCICAP_EXP_DEVCTL_UR_REPORT);
+ pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
+
+ if (!aercap) return;
+
+ /* Mask various unrecoverable errors */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, &val32);
+ val32 |= (PCIECAP_AER_UE_MASK_POISON_TLP |
+ PCIECAP_AER_UE_MASK_COMPL_TIMEOUT |
+ PCIECAP_AER_UE_MASK_COMPL_ABORT |
+ PCIECAP_AER_UE_MASK_ECRC);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, val32);
+
+ /* Report various unrecoverable errors as fatal errors */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, &val32);
+ val32 |= (PCIECAP_AER_UE_SEVERITY_DLLP |
+ PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
+ PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_SEVERITY_UNEXP_COMPL |
+ PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
+ PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
+
+ /* Mask various recoverable errors */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, &val32);
+ val32 |= PCIECAP_AER_CE_MASK_ADV_NONFATAL;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
+
+ /* Enable ECRC check */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
+ val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
+ PCIECAP_AER_CAPCTL_ECRCC_EN);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
+
+ /* Enable all error reporting */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, &val32);
+ val32 |= (PCIECAP_AER_RERR_CMD_FE |
+ PCIECAP_AER_RERR_CMD_NFE |
+ PCIECAP_AER_RERR_CMD_CE);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_RERR_CMD, val32);
+}
+
+static void phb3_switch_port_init(struct phb *phb,
+ struct pci_device *dev,
+ int ecap, int aercap)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint16_t bdfn = dev->bdfn;
+ uint16_t val16;
+ uint32_t val32;
+
+ /* Enable SERR and parity checking and disable INTx */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
+ val16 |= (PCI_CFG_CMD_PERR_RESP |
+ PCI_CFG_CMD_SERR_EN |
+ PCI_CFG_CMD_INTx_DIS);
+ pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
+
+ /* Disable partity error and enable system error */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_BRCTL, &val16);
+ val16 &= ~PCI_CFG_BRCTL_PERR_RESP_EN;
+ val16 |= PCI_CFG_BRCTL_SERR_EN;
+ pci_cfg_write16(phb, bdfn, PCI_CFG_BRCTL, val16);
+
+ /* Enable reporting various errors */
+ if (!ecap) return;
+ pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
+ val16 |= (PCICAP_EXP_DEVCTL_CE_REPORT |
+ PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT);
+ /* HW279570 - Disable reporting of correctable errors */
+ val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
+ pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
+
+ /* Unmask all unrecoverable errors */
+ if (!aercap) return;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_MASK, 0x0);
+
+ /* Severity of unrecoverable errors */
+ if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT)
+ val32 = (PCIECAP_AER_UE_SEVERITY_DLLP |
+ PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN |
+ PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW |
+ PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP |
+ PCIECAP_AER_UE_SEVERITY_INTERNAL);
+ else
+ val32 = (PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_SEVERITY_INTERNAL);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_UE_SEVERITY, val32);
+
+ /*
+ * Mask various correctable errors
+ *
+ * On Murano and Venice DD1.0 we disable emission of corrected
+ * error messages to the PHB completely to workaround errata
+ * HW257476 causing the loss of tags.
+ */
+ if (p->rev < PHB3_REV_MURANO_DD20)
+ val32 = 0xffffffff;
+ else
+ val32 = PCIECAP_AER_CE_MASK_ADV_NONFATAL;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK, val32);
+
+ /* Enable ECRC generation and disable ECRC check */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
+ val32 |= PCIECAP_AER_CAPCTL_ECRCG_EN;
+ val32 &= ~PCIECAP_AER_CAPCTL_ECRCC_EN;
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
+}
+
+static void phb3_endpoint_init(struct phb *phb,
+ struct pci_device *dev,
+ int ecap, int aercap)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint16_t bdfn = dev->bdfn;
+ uint16_t val16;
+ uint32_t val32;
+
+ /* Enable SERR and parity checking */
+ pci_cfg_read16(phb, bdfn, PCI_CFG_CMD, &val16);
+ val16 |= (PCI_CFG_CMD_PERR_RESP |
+ PCI_CFG_CMD_SERR_EN);
+ pci_cfg_write16(phb, bdfn, PCI_CFG_CMD, val16);
+
+ /* Enable reporting various errors */
+ if (!ecap) return;
+ pci_cfg_read16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, &val16);
+ val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
+ val16 |= (PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT |
+ PCICAP_EXP_DEVCTL_UR_REPORT);
+ /* HW279570 - Disable reporting of correctable errors */
+ val16 &= ~PCICAP_EXP_DEVCTL_CE_REPORT;
+ pci_cfg_write16(phb, bdfn, ecap + PCICAP_EXP_DEVCTL, val16);
+
+ /*
+ * On Murano and Venice DD1.0 we disable emission of corrected
+ * error messages to the PHB completely to workaround errata
+ * HW257476 causing the loss of tags.
+ */
+ if (p->rev < PHB3_REV_MURANO_DD20)
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CE_MASK,
+ 0xffffffff);
+
+ /* Enable ECRC generation and check */
+ pci_cfg_read32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, &val32);
+ val32 |= (PCIECAP_AER_CAPCTL_ECRCG_EN |
+ PCIECAP_AER_CAPCTL_ECRCC_EN);
+ pci_cfg_write32(phb, bdfn, aercap + PCIECAP_AER_CAPCTL, val32);
+}
+
+static void phb3_device_init(struct phb *phb, struct pci_device *dev)
+{
+ int ecap = 0;
+ int aercap = 0;
+
+ /* Figure out PCIe & AER capability */
+ if (pci_has_cap(dev, PCI_CFG_CAP_ID_EXP, false)) {
+ ecap = pci_cap(dev, PCI_CFG_CAP_ID_EXP, false);
+
+ if (!pci_has_cap(dev, PCIECAP_ID_AER, true)) {
+ aercap = pci_find_ecap(phb, dev->bdfn,
+ PCIECAP_ID_AER, NULL);
+ if (aercap > 0)
+ pci_set_cap(dev, PCIECAP_ID_AER, aercap, true);
+ } else {
+ aercap = pci_cap(dev, PCIECAP_ID_AER, true);
+ }
+ }
+
+ /* Reconfigure the MPS */
+ pci_configure_mps(phb, dev);
+
+ if (dev->dev_type == PCIE_TYPE_ROOT_PORT)
+ phb3_root_port_init(phb, dev, ecap, aercap);
+ else if (dev->dev_type == PCIE_TYPE_SWITCH_UPPORT ||
+ dev->dev_type == PCIE_TYPE_SWITCH_DNPORT)
+ phb3_switch_port_init(phb, dev, ecap, aercap);
+ else
+ phb3_endpoint_init(phb, dev, ecap, aercap);
+}
+
+static int64_t phb3_pci_reinit(struct phb *phb, uint64_t scope, uint64_t data)
+{
+ struct pci_device *pd;
+ uint16_t bdfn = data;
+
+ if (scope != OPAL_REINIT_PCI_DEV)
+ return OPAL_PARAMETER;
+
+ pd = pci_find_dev(phb, bdfn);
+ if (!pd)
+ return OPAL_PARAMETER;
+
+ phb3_device_init(phb, pd);
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_presence_detect(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint16_t slot_stat;
+ uint64_t hp_override;
+ int64_t rc;
+
+ /* Test for PHB in error state ? */
+ if (p->state == PHB3_STATE_BROKEN)
+ return OPAL_HARDWARE;
+
+ /* XXX Check bifurcation stuff ? */
+
+ /* Read slot status register */
+ rc = phb3_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_SLOTSTAT,
+ &slot_stat);
+ if (rc != OPAL_SUCCESS)
+ return OPAL_HARDWARE;
+
+ /* Read hotplug override */
+ hp_override = in_be64(p->regs + PHB_HOTPLUG_OVERRIDE);
+
+ printf("PHB%d: slot_stat: 0x%04x, hp_override: 0x%016llx\n",
+ phb->opal_id, slot_stat, hp_override);
+
+ /* So if the slot status says nothing connected, we bail out */
+ if (!(slot_stat & PCICAP_EXP_SLOTSTAT_PDETECTST))
+ return OPAL_SHPC_DEV_NOT_PRESENT;
+
+ /*
+ * At this point, we can have one of those funky IBM
+ * systems that has the presence bit set in the slot
+ * status and nothing actually connected. If so, we
+ * check the hotplug override A/B bits
+ */
+ if (p->use_ab_detect &&
+ (hp_override & PHB_HPOVR_PRESENCE_A) &&
+ (hp_override & PHB_HPOVR_PRESENCE_B))
+ return OPAL_SHPC_DEV_NOT_PRESENT;
+
+ /*
+ * Anything else, we assume device present, the link state
+ * machine will perform an early bail out if no electrical
+ * signaling is established after a second.
+ */
+ return OPAL_SHPC_DEV_PRESENT;
+}
+
+/* Clear IODA cache tables */
+static void phb3_init_ioda_cache(struct phb3 *p)
+{
+ uint32_t i;
+ uint64_t *data64;
+
+ /*
+ * RTT and PELTV. RTE should be 0xFF's to indicate
+ * invalid PE# for the corresponding RID.
+ *
+ * Note: Instead we set all RTE entries to 0x00 to
+ * work around a problem where PE lookups might be
+ * done before Linux has established valid PE's
+ * (during PCI probing). We can revisit that once/if
+ * Linux has been fixed to always setup valid PEs.
+ *
+ * The value 0x00 corresponds to the default PE# Linux
+ * uses to check for config space freezes before it
+ * has assigned PE# to busses.
+ *
+ * WARNING: Additionally, we need to be careful, there's
+ * a HW issue, if we get an MSI on an RTT entry that is
+ * FF, things will go bad. We need to ensure we don't
+ * ever let a live FF RTT even temporarily when resetting
+ * for EEH etc... (HW278969).
+ */
+ memset(p->rte_cache, 0x00, RTT_TABLE_SIZE);
+ memset(p->peltv_cache, 0x0, sizeof(p->peltv_cache));
+
+ /* Disable all LSI */
+ for (i = 0; i < ARRAY_SIZE(p->lxive_cache); i++) {
+ data64 = &p->lxive_cache[i];
+ *data64 = SETFIELD(IODA2_LXIVT_PRIORITY, 0ul, 0xff);
+ *data64 = SETFIELD(IODA2_LXIVT_SERVER, *data64, 0x0);
+ }
+
+ /* Diable all MSI */
+ for (i = 0; i < ARRAY_SIZE(p->ive_cache); i++) {
+ data64 = &p->ive_cache[i];
+ *data64 = SETFIELD(IODA2_IVT_PRIORITY, 0ul, 0xff);
+ *data64 = SETFIELD(IODA2_IVT_SERVER, *data64, 0x0);
+ }
+
+ /* Clear TVT */
+ memset(p->tve_cache, 0x0, sizeof(p->tve_cache));
+ /* Clear M32 domain */
+ memset(p->m32d_cache, 0x0, sizeof(p->m32d_cache));
+ /* Clear M64 domain */
+ memset(p->m64b_cache, 0x0, sizeof(p->m64b_cache));
+}
+
+/* phb3_ioda_reset - Reset the IODA tables
+ *
+ * @purge: If true, the cache is cleared and the cleared values
+ * are applied to HW. If false, the cached values are
+ * applied to HW
+ *
+ * This reset the IODA tables in the PHB. It is called at
+ * initialization time, on PHB reset, and can be called
+ * explicitly from OPAL
+ */
+static int64_t phb3_ioda_reset(struct phb *phb, bool purge)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t server, prio;
+ uint64_t *pdata64, data64;
+ uint32_t i;
+
+ if (purge) {
+ printf("PHB%d: Purging all IODA tables...\n", p->phb.opal_id);
+ phb3_init_ioda_cache(p);
+ }
+
+ /* Init_27..28 - LIXVT */
+ phb3_ioda_sel(p, IODA2_TBL_LXIVT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->lxive_cache); i++) {
+ data64 = p->lxive_cache[i];
+ server = GETFIELD(IODA2_LXIVT_SERVER, data64);
+ prio = GETFIELD(IODA2_LXIVT_PRIORITY, data64);
+ data64 = SETFIELD(IODA2_LXIVT_SERVER, data64, server);
+ data64 = SETFIELD(IODA2_LXIVT_PRIORITY, data64, prio);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ }
+
+ /* Init_29..30 - MRT */
+ phb3_ioda_sel(p, IODA2_TBL_MRT, 0, true);
+ for (i = 0; i < 8; i++)
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+
+ /* Init_31..32 - TVT */
+ phb3_ioda_sel(p, IODA2_TBL_TVT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->tve_cache); i++)
+ out_be64(p->regs + PHB_IODA_DATA0, p->tve_cache[i]);
+
+ /* Init_33..34 - M64BT */
+ phb3_ioda_sel(p, IODA2_TBL_M64BT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->m64b_cache); i++)
+ out_be64(p->regs + PHB_IODA_DATA0, p->m64b_cache[i]);
+
+ /* Init_35..36 - M32DT */
+ phb3_ioda_sel(p, IODA2_TBL_M32DT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->m32d_cache); i++)
+ out_be64(p->regs + PHB_IODA_DATA0, p->m32d_cache[i]);
+
+ /* Load RTE, PELTV */
+ if (p->tbl_rtt)
+ memcpy((void *)p->tbl_rtt, p->rte_cache, RTT_TABLE_SIZE);
+ if (p->tbl_peltv)
+ memcpy((void *)p->tbl_peltv, p->peltv_cache, PELTV_TABLE_SIZE);
+
+ /* Load IVT */
+ if (p->tbl_ivt) {
+ pdata64 = (uint64_t *)p->tbl_ivt;
+ for (i = 0; i < IVT_TABLE_ENTRIES; i++)
+ pdata64[i * IVT_TABLE_STRIDE] = p->ive_cache[i];
+ }
+
+ /* Invalidate RTE, IVE, TCE cache */
+ out_be64(p->regs + PHB_RTC_INVALIDATE, PHB_RTC_INVALIDATE_ALL);
+ out_be64(p->regs + PHB_IVC_INVALIDATE, PHB_IVC_INVALIDATE_ALL);
+ out_be64(p->regs + PHB_TCE_KILL, PHB_TCE_KILL_ALL);
+
+ /* Clear RBA */
+ if (p->rev >= PHB3_REV_MURANO_DD20) {
+ phb3_ioda_sel(p, IODA2_TBL_RBA, 0, true);
+ for (i = 0; i < 32; i++)
+ out_be64(p->regs + PHB_IODA_DATA0, 0x0ul);
+ }
+
+ /* Clear PEST & PEEV */
+ for (i = 0; i < PHB3_MAX_PE_NUM; i++) {
+ uint64_t pesta, pestb;
+
+ phb3_ioda_sel(p, IODA2_TBL_PESTA, i, false);
+ pesta = in_be64(p->regs + PHB_IODA_DATA0);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+ phb3_ioda_sel(p, IODA2_TBL_PESTB, i, false);
+ pestb = in_be64(p->regs + PHB_IODA_DATA0);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+
+ if ((pesta & IODA2_PESTA_MMIO_FROZEN) ||
+ (pestb & IODA2_PESTB_DMA_STOPPED))
+ PHBDBG(p, "Frozen PE#%d (%s - %s)\n",
+ i, (pesta & IODA2_PESTA_MMIO_FROZEN) ? "DMA" : "",
+ (pestb & IODA2_PESTB_DMA_STOPPED) ? "MMIO" : "");
+ }
+
+ phb3_ioda_sel(p, IODA2_TBL_PEEV, 0, true);
+ for (i = 0; i < 4; i++)
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_set_phb_mem_window(struct phb *phb,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint64_t addr,
+ uint64_t __unused pci_addr,
+ uint64_t size)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t data64;
+
+ /*
+ * By design, PHB3 doesn't support IODT any more.
+ * Besides, we can't enable M32 BAR as well. So
+ * the function is used to do M64 mapping and each
+ * BAR is supposed to be shared by all PEs.
+ */
+ switch (window_type) {
+ case OPAL_IO_WINDOW_TYPE:
+ case OPAL_M32_WINDOW_TYPE:
+ return OPAL_UNSUPPORTED;
+ case OPAL_M64_WINDOW_TYPE:
+ if (window_num >= 16)
+ return OPAL_PARAMETER;
+
+ data64 = p->m64b_cache[window_num];
+ if (data64 & IODA2_M64BT_SINGLE_PE) {
+ if ((addr & 0x1FFFFFFul) ||
+ (size & 0x1FFFFFFul))
+ return OPAL_PARAMETER;
+ } else {
+ if ((addr & 0xFFFFFul) ||
+ (size & 0xFFFFFul))
+ return OPAL_PARAMETER;
+ }
+
+ /* size should be 2^N */
+ if (!size || size & (size-1))
+ return OPAL_PARAMETER;
+
+ /* address should be size aligned */
+ if (addr & (size - 1))
+ return OPAL_PARAMETER;
+
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ if (data64 & IODA2_M64BT_SINGLE_PE) {
+ data64 = SETFIELD(IODA2_M64BT_SINGLE_BASE, data64,
+ addr >> 25);
+ data64 = SETFIELD(IODA2_M64BT_SINGLE_MASK, data64,
+ 0x20000000 - (size >> 25));
+ } else {
+ data64 = SETFIELD(IODA2_M64BT_BASE, data64,
+ addr >> 20);
+ data64 = SETFIELD(IODA2_M64BT_MASK, data64,
+ 0x40000000 - (size >> 20));
+ }
+ p->m64b_cache[window_num] = data64;
+
+ return OPAL_SUCCESS;
+}
+
+/*
+ * For one specific M64 BAR, it can be shared by all PEs,
+ * or owned by single PE exclusively.
+ */
+static int64_t phb3_phb_mmio_enable(struct phb *phb,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint16_t enable)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t data64, base, mask;
+
+ /*
+ * By design, PHB3 doesn't support IODT any more.
+ * Besides, we can't enable M32 BAR as well. So
+ * the function is used to do M64 mapping and each
+ * BAR is supposed to be shared by all PEs.
+ */
+ switch (window_type) {
+ case OPAL_IO_WINDOW_TYPE:
+ case OPAL_M32_WINDOW_TYPE:
+ return OPAL_UNSUPPORTED;
+ case OPAL_M64_WINDOW_TYPE:
+ if (window_num >= 16 ||
+ enable > OPAL_ENABLE_M64_NON_SPLIT)
+ return OPAL_PARAMETER;
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /*
+ * We need check the base/mask while enabling
+ * the M64 BAR. Otherwise, invalid base/mask
+ * might cause fenced AIB unintentionally
+ */
+ data64 = p->m64b_cache[window_num];
+ switch (enable) {
+ case OPAL_DISABLE_M64:
+ data64 &= ~IODA2_M64BT_SINGLE_PE;
+ data64 &= ~IODA2_M64BT_ENABLE;
+ break;
+ case OPAL_ENABLE_M64_SPLIT:
+ if (data64 & IODA2_M64BT_SINGLE_PE)
+ return OPAL_PARAMETER;
+ base = GETFIELD(IODA2_M64BT_BASE, data64);
+ base = (base << 20);
+ mask = GETFIELD(IODA2_M64BT_MASK, data64);
+ if (base < p->mm0_base || !mask)
+ return OPAL_PARTIAL;
+
+ data64 |= IODA2_M64BT_ENABLE;
+ break;
+ case OPAL_ENABLE_M64_NON_SPLIT:
+ if (!(data64 & IODA2_M64BT_SINGLE_PE))
+ return OPAL_PARAMETER;
+ base = GETFIELD(IODA2_M64BT_SINGLE_BASE, data64);
+ base = (base << 25);
+ mask = GETFIELD(IODA2_M64BT_SINGLE_MASK, data64);
+ if (base < p->mm0_base || !mask)
+ return OPAL_PARTIAL;
+
+ data64 |= IODA2_M64BT_SINGLE_PE;
+ data64 |= IODA2_M64BT_ENABLE;
+ break;
+ }
+
+ /* Update HW and cache */
+ phb3_ioda_sel(p, IODA2_TBL_M64BT, window_num, false);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ p->m64b_cache[window_num] = data64;
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_map_pe_mmio_window(struct phb *phb,
+ uint16_t pe_num,
+ uint16_t window_type,
+ uint16_t window_num,
+ uint16_t segment_num)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t data64, *cache;
+
+ if (pe_num >= PHB3_MAX_PE_NUM)
+ return OPAL_PARAMETER;
+
+ /*
+ * PHB3 doesn't support IODT any more. On the other
+ * hand, PHB3 support M64DT with much more flexibility.
+ * we need figure it out later. At least, we never use
+ * M64DT in kernel.
+ */
+ switch(window_type) {
+ case OPAL_IO_WINDOW_TYPE:
+ return OPAL_UNSUPPORTED;
+ case OPAL_M32_WINDOW_TYPE:
+ if (window_num != 0 || segment_num >= PHB3_MAX_PE_NUM)
+ return OPAL_PARAMETER;
+
+ cache = &p->m32d_cache[segment_num];
+ phb3_ioda_sel(p, IODA2_TBL_M32DT, segment_num, false);
+ out_be64(p->regs + PHB_IODA_DATA0,
+ SETFIELD(IODA2_M32DT_PE, 0ull, pe_num));
+ *cache = SETFIELD(IODA2_M32DT_PE, 0ull, pe_num);
+
+ break;
+ case OPAL_M64_WINDOW_TYPE:
+ if (window_num >= 16)
+ return OPAL_PARAMETER;
+ cache = &p->m64b_cache[window_num];
+ data64 = *cache;
+
+ /* The BAR shouldn't be enabled yet */
+ if (data64 & IODA2_M64BT_ENABLE)
+ return OPAL_PARTIAL;
+
+ data64 |= IODA2_M64BT_SINGLE_PE;
+ data64 = SETFIELD(IODA2_M64BT_PE_HI, data64, pe_num >> 5);
+ data64 = SETFIELD(IODA2_M64BT_PE_LOW, data64, pe_num);
+ *cache = data64;
+
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_map_pe_dma_window(struct phb *phb,
+ uint16_t pe_num,
+ uint16_t window_id,
+ uint16_t tce_levels,
+ uint64_t tce_table_addr,
+ uint64_t tce_table_size,
+ uint64_t tce_page_size)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t tts_encoded;
+ uint64_t data64 = 0;
+
+ /*
+ * Sanity check. We currently only support "2 window per PE" mode
+ * ie, only bit 59 of the PCI address is used to select the window
+ */
+ if (pe_num >= PHB3_MAX_PE_NUM ||
+ (window_id >> 1) != pe_num)
+ return OPAL_PARAMETER;
+
+ /*
+ * tce_table_size == 0 is used to disable an entry, in this case
+ * we ignore other arguments
+ */
+ if (tce_table_size == 0) {
+ phb3_ioda_sel(p, IODA2_TBL_TVT, window_id, false);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+ p->tve_cache[window_id] = 0;
+ return OPAL_SUCCESS;
+ }
+
+ /* Additional arguments validation */
+ if (tce_levels < 1 || tce_levels > 5 ||
+ !is_pow2(tce_table_size) ||
+ tce_table_size < 0x1000)
+ return OPAL_PARAMETER;
+
+ /* Encode TCE table size */
+ data64 = SETFIELD(IODA2_TVT_TABLE_ADDR, 0ul, tce_table_addr >> 12);
+ tts_encoded = ilog2(tce_table_size) - 11;
+ if (tts_encoded > 31)
+ return OPAL_PARAMETER;
+ data64 = SETFIELD(IODA2_TVT_TCE_TABLE_SIZE, data64, tts_encoded);
+
+ /* Encode TCE page size */
+ switch (tce_page_size) {
+ case 0x1000: /* 4K */
+ data64 = SETFIELD(IODA2_TVT_IO_PSIZE, data64, 1);
+ break;
+ case 0x10000: /* 64K */
+ data64 = SETFIELD(IODA2_TVT_IO_PSIZE, data64, 5);
+ break;
+ case 0x1000000: /* 16M */
+ data64 = SETFIELD(IODA2_TVT_IO_PSIZE, data64, 13);
+ break;
+ case 0x10000000: /* 256M */
+ data64 = SETFIELD(IODA2_TVT_IO_PSIZE, data64, 17);
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /* Encode number of levels */
+ data64 = SETFIELD(IODA2_TVT_NUM_LEVELS, data64, tce_levels - 1);
+
+ phb3_ioda_sel(p, IODA2_TBL_TVT, window_id, false);
+ out_be64(p->regs + PHB_IODA_DATA0, data64);
+ p->tve_cache[window_id] = data64;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_map_pe_dma_window_real(struct phb *phb,
+ uint16_t pe_num,
+ uint16_t window_id,
+ uint64_t pci_start_addr,
+ uint64_t pci_mem_size)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t end = pci_start_addr + pci_mem_size;
+ uint64_t tve;
+
+ if (pe_num >= PHB3_MAX_PE_NUM ||
+ (window_id >> 1) != pe_num)
+ return OPAL_PARAMETER;
+
+ if (pci_mem_size) {
+ /* Enable */
+
+ /*
+ * Check that the start address has the right TVE index,
+ * we only support the 1 bit mode where each PE has 2
+ * TVEs
+ */
+ if ((pci_start_addr >> 59) != (window_id & 1))
+ return OPAL_PARAMETER;
+ pci_start_addr &= ((1ull << 59) - 1);
+ end = pci_start_addr + pci_mem_size;
+
+ /* We have to be 16M aligned */
+ if ((pci_start_addr & 0x00ffffff) ||
+ (pci_mem_size & 0x00ffffff))
+ return OPAL_PARAMETER;
+
+ /*
+ * It *looks* like this is the max we can support (we need
+ * to verify this. Also we are not checking for rollover,
+ * but then we aren't trying too hard to protect ourselves
+ * againt a completely broken OS.
+ */
+ if (end > 0x0003ffffffffffffull)
+ return OPAL_PARAMETER;
+
+ /*
+ * Put start address bits 49:24 into TVE[52:53]||[0:23]
+ * and end address bits 49:24 into TVE[54:55]||[24:47]
+ * and set TVE[51]
+ */
+ tve = (pci_start_addr << 16) & (0xffffffull << 48);
+ tve |= (pci_start_addr >> 38) & (3ull << 10);
+ tve |= (end >> 8) & (0xfffffful << 16);
+ tve |= (end >> 40) & (3ull << 8);
+ tve |= PPC_BIT(51);
+ } else {
+ /* Disable */
+ tve = 0;
+ }
+
+ phb3_ioda_sel(p, IODA2_TBL_TVT, window_id, false);
+ out_be64(p->regs + PHB_IODA_DATA0, tve);
+ p->tve_cache[window_id] = tve;
+
+ return OPAL_SUCCESS;
+}
+
+static void phb3_pci_msi_check_q(struct phb3 *p, uint32_t ive_num)
+{
+ uint64_t ive, ivc, ffi;
+ uint8_t *q_byte;
+
+ /* Each IVE has 16-bytes or 128-bytes */
+ ive = p->tbl_ivt + (ive_num * IVT_TABLE_STRIDE * 8);
+ q_byte = (uint8_t *)(ive + 5);
+
+ /*
+ * Handle Q bit. If the Q bit doesn't show up,
+ * we would have CI load to make that.
+ */
+ if (!(*q_byte & 0x1)) {
+ /* Read from random PHB reg to force flush */
+ in_be64(p->regs + PHB_IVC_UPDATE);
+
+ /* Order with subsequent read of Q */
+ sync();
+
+ /* Q still not set, bail out */
+ if (!(*q_byte & 0x1))
+ return;
+ }
+
+ /* Lock FFI and send interrupt */
+ while (in_be64(p->regs + PHB_FFI_LOCK))
+ /* XXX Handle fences ! */
+ ;
+
+ /* Clear Q bit and update IVC */
+ *q_byte = 0;
+ ivc = SETFIELD(PHB_IVC_UPDATE_SID, 0ul, ive_num) |
+ PHB_IVC_UPDATE_ENABLE_Q;
+ out_be64(p->regs + PHB_IVC_UPDATE, ivc);
+
+ /*
+ * Resend interrupt. Note the lock clear bit isn't documented in
+ * the PHB3 spec and thus is probably unnecessary but it's in
+ * IODA2 so let's be safe here, it won't hurt to set it
+ */
+ ffi = SETFIELD(PHB_FFI_REQUEST_ISN, 0ul, ive_num) | PHB_FFI_LOCK_CLEAR;
+ out_be64(p->regs + PHB_FFI_REQUEST, ffi);
+}
+
+static int64_t phb3_pci_msi_eoi(struct phb *phb,
+ uint32_t hwirq)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint32_t ive_num = PHB3_IRQ_NUM(hwirq);
+ uint64_t ive, ivc;
+ uint8_t *p_byte, gp, gen;
+
+ /* OS might not configure IVT yet */
+ if (!p->tbl_ivt)
+ return OPAL_HARDWARE;
+
+ /* Each IVE has 16-bytes or 128-bytes */
+ ive = p->tbl_ivt + (ive_num * IVT_TABLE_STRIDE * 8);
+ p_byte = (uint8_t *)(ive + 4);
+
+ /* Read generation and P */
+ gp = *p_byte;
+ gen = gp >> 1;
+
+ /* Increment generation count and clear P */
+ *p_byte = ((gen + 1) << 1) & 0x7;
+
+ /* Update the IVC with a match against the old gen count */
+ ivc = SETFIELD(PHB_IVC_UPDATE_SID, 0ul, ive_num) |
+ PHB_IVC_UPDATE_ENABLE_P |
+ PHB_IVC_UPDATE_ENABLE_GEN |
+ SETFIELD(PHB_IVC_UPDATE_GEN_MATCH, 0ul, gen);
+ out_be64(p->regs + PHB_IVC_UPDATE, ivc);
+
+ /* Handle Q bit */
+ phb3_pci_msi_check_q(p, ive_num);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_set_ive_pe(struct phb *phb,
+ uint32_t pe_num,
+ uint32_t ive_num)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t *cache, ivep, data64;
+ uint16_t *pe_word;
+
+ /* OS should enable the BAR in advance */
+ if (!p->tbl_ivt)
+ return OPAL_HARDWARE;
+
+ /* Each IVE reserves 128 bytes */
+ if (pe_num >= PHB3_MAX_PE_NUM ||
+ ive_num >= IVT_TABLE_ENTRIES)
+ return OPAL_PARAMETER;
+
+ /* Update IVE cache */
+ cache = &p->ive_cache[ive_num];
+ *cache = SETFIELD(IODA2_IVT_PE, *cache, pe_num);
+
+ /* Update in-memory IVE without clobbering P and Q */
+ ivep = p->tbl_ivt + (ive_num * IVT_TABLE_STRIDE * 8);
+ pe_word = (uint16_t *)(ivep + 6);
+ *pe_word = pe_num;
+
+ /* Invalidate IVC */
+ data64 = SETFIELD(PHB_IVC_INVALIDATE_SID, 0ul, ive_num);
+ out_be64(p->regs + PHB_IVC_INVALIDATE, data64);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_get_msi_32(struct phb *phb __unused,
+ uint32_t pe_num,
+ uint32_t ive_num,
+ uint8_t msi_range,
+ uint32_t *msi_address,
+ uint32_t *message_data)
+{
+ /*
+ * Sanity check. We needn't check on mve_number (PE#)
+ * on PHB3 since the interrupt source is purely determined
+ * by its DMA address and data, but the check isn't
+ * harmful.
+ */
+ if (pe_num >= PHB3_MAX_PE_NUM ||
+ ive_num >= IVT_TABLE_ENTRIES ||
+ msi_range != 1 || !msi_address|| !message_data)
+ return OPAL_PARAMETER;
+
+ /*
+ * DMA address and data will form the IVE index.
+ * For more details, please refer to IODA2 spec.
+ */
+ *msi_address = 0xFFFF0000 | ((ive_num << 4) & 0xFFFFFE0F);
+ *message_data = ive_num & 0x1F;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_get_msi_64(struct phb *phb __unused,
+ uint32_t pe_num,
+ uint32_t ive_num,
+ uint8_t msi_range,
+ uint64_t *msi_address,
+ uint32_t *message_data)
+{
+ /* Sanity check */
+ if (pe_num >= PHB3_MAX_PE_NUM ||
+ ive_num >= IVT_TABLE_ENTRIES ||
+ msi_range != 1 || !msi_address || !message_data)
+ return OPAL_PARAMETER;
+
+ /*
+ * DMA address and data will form the IVE index.
+ * For more details, please refer to IODA2 spec.
+ */
+ *msi_address = (0x1ul << 60) | ((ive_num << 4) & 0xFFFFFFFFFFFFFE0Ful);
+ *message_data = ive_num & 0x1F;
+
+ return OPAL_SUCCESS;
+}
+
+static bool phb3_err_check_pbcq(struct phb3 *p)
+{
+ uint64_t nfir, mask, wof, val64;
+ int32_t class, bit;
+ uint64_t severity[PHB3_ERR_CLASS_LAST] = {
+ 0x0000000000000000, /* NONE */
+ 0x018000F800000000, /* DEAD */
+ 0x7E7DC70000000000, /* FENCED */
+ 0x0000000000000000, /* ER */
+ 0x0000000000000000 /* INF */
+ };
+
+ /*
+ * Read on NFIR to see if XSCOM is working properly.
+ * If XSCOM doesn't work well, we need take the PHB
+ * into account any more.
+ */
+ xscom_read(p->chip_id, p->pe_xscom + 0x0, &nfir);
+ if (nfir == 0xffffffffffffffff) {
+ p->err.err_src = PHB3_ERR_SRC_NONE;
+ p->err.err_class = PHB3_ERR_CLASS_DEAD;
+ phb3_set_err_pending(p, true);
+ return true;
+ }
+
+ /*
+ * Check WOF. We need handle unmasked errors firstly.
+ * We probably run into the situation (on simulator)
+ * where we have asserted FIR bits, but WOF has nothing.
+ * For that case, we should check FIR as well.
+ */
+ xscom_read(p->chip_id, p->pe_xscom + 0x3, &mask);
+ xscom_read(p->chip_id, p->pe_xscom + 0x8, &wof);
+ if (wof & ~mask)
+ wof &= ~mask;
+ if (!wof) {
+ if (nfir & ~mask)
+ nfir &= ~mask;
+ if (!nfir)
+ return false;
+ wof = nfir;
+ }
+
+ /* We shouldn't hit class PHB3_ERR_CLASS_NONE */
+ for (class = PHB3_ERR_CLASS_NONE;
+ class < PHB3_ERR_CLASS_LAST;
+ class++) {
+ val64 = wof & severity[class];
+ if (!val64)
+ continue;
+
+ for (bit = 0; bit < 64; bit++) {
+ if (val64 & PPC_BIT(bit)) {
+ p->err.err_src = PHB3_ERR_SRC_PBCQ;
+ p->err.err_class = class;
+ p->err.err_bit = 63 - bit;
+ phb3_set_err_pending(p, true);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool phb3_err_check_lem(struct phb3 *p)
+{
+ uint64_t fir, wof, mask, val64;
+ int32_t class, bit;
+ uint64_t severity[PHB3_ERR_CLASS_LAST] = {
+ 0x0000000000000000, /* NONE */
+ 0x0000000000000000, /* DEAD */
+ 0xADB670C980ADD151, /* FENCED */
+ 0x000800107F500A2C, /* ER */
+ 0x42018E2200002482 /* INF */
+ };
+
+ /*
+ * Read FIR. If XSCOM or ASB is frozen, we needn't
+ * go forward and just mark the PHB with dead state
+ */
+ fir = phb3_read_reg_asb(p, PHB_LEM_FIR_ACCUM);
+ if (fir == 0xffffffffffffffff) {
+ p->err.err_src = PHB3_ERR_SRC_PHB;
+ p->err.err_class = PHB3_ERR_CLASS_DEAD;
+ phb3_set_err_pending(p, true);
+ return true;
+ }
+
+ /*
+ * Check on WOF for the unmasked errors firstly. Under
+ * some situation where we run skiboot on simulator,
+ * we already had FIR bits asserted, but WOF is still zero.
+ * For that case, we check FIR directly.
+ */
+ wof = phb3_read_reg_asb(p, PHB_LEM_WOF);
+ mask = phb3_read_reg_asb(p, PHB_LEM_ERROR_MASK);
+ if (wof & ~mask)
+ wof &= ~mask;
+ if (!wof) {
+ if (fir & ~mask)
+ fir &= ~mask;
+ if (!fir)
+ return false;
+ wof = fir;
+ }
+
+ /* We shouldn't hit PHB3_ERR_CLASS_NONE */
+ for (class = PHB3_ERR_CLASS_NONE;
+ class < PHB3_ERR_CLASS_LAST;
+ class++) {
+ val64 = wof & severity[class];
+ if (!val64)
+ continue;
+
+ for (bit = 0; bit < 64; bit++) {
+ if (val64 & PPC_BIT(bit)) {
+ p->err.err_src = PHB3_ERR_SRC_PHB;
+ p->err.err_class = class;
+ p->err.err_bit = 63 - bit;
+ phb3_set_err_pending(p, true);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/*
+ * The function can be called during error recovery for INF
+ * and ER class. For INF case, it's expected to be called
+ * when grabbing the error log. We will call it explicitly
+ * when clearing frozen PE state for ER case.
+ */
+static void phb3_err_ER_clear(struct phb3 *p)
+{
+ uint32_t val32;
+ uint64_t val64;
+ uint64_t fir = in_be64(p->regs + PHB_LEM_FIR_ACCUM);
+
+ /* Rec 1: Grab the PCI config lock */
+ /* Removed... unnecessary. We have our own lock here */
+
+ /* Rec 2/3/4: Take all inbound transactions */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000001c00000000ul);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0x10000000);
+
+ /* Rec 5/6/7: Clear pending non-fatal errors */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000005000000000ul);
+ val32 = in_be32(p->regs + PHB_CONFIG_DATA);
+ out_be32(p->regs + PHB_CONFIG_DATA, (val32 & 0xe0700000) | 0x0f000f00);
+
+ /* Rec 8/9/10: Clear pending fatal errors for AER */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000010400000000ul);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
+
+ /* Rec 11/12/13: Clear pending non-fatal errors for AER */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000011000000000ul);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
+
+ /* Rec 22/23/24: Clear root port errors */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000013000000000ul);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0xffffffff);
+
+ /* Rec 25/26/27: Enable IO and MMIO bar */
+ out_be64(p->regs + PHB_CONFIG_ADDRESS, 0x8000004000000000ul);
+ out_be32(p->regs + PHB_CONFIG_DATA, 0x470100f8);
+
+ /* Rec 28: Release the PCI config lock */
+ /* Removed... unnecessary. We have our own lock here */
+
+ /* Rec 29...34: Clear UTL errors */
+ val64 = in_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS, val64);
+ val64 = in_be64(p->regs + UTL_PCIE_PORT_STATUS);
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, val64);
+ val64 = in_be64(p->regs + UTL_RC_STATUS);
+ out_be64(p->regs + UTL_RC_STATUS, val64);
+
+ /* Rec 39...66: Clear PHB error trap */
+ val64 = in_be64(p->regs + PHB_ERR_STATUS);
+ out_be64(p->regs + PHB_ERR_STATUS, val64);
+ out_be64(p->regs + PHB_ERR1_STATUS, 0x0ul);
+ out_be64(p->regs + PHB_ERR_LOG_0, 0x0ul);
+ out_be64(p->regs + PHB_ERR_LOG_1, 0x0ul);
+
+ val64 = in_be64(p->regs + PHB_OUT_ERR_STATUS);
+ out_be64(p->regs + PHB_OUT_ERR_STATUS, val64);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS, 0x0ul);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_0, 0x0ul);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_1, 0x0ul);
+
+ val64 = in_be64(p->regs + PHB_INA_ERR_STATUS);
+ out_be64(p->regs + PHB_INA_ERR_STATUS, val64);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS, 0x0ul);
+ out_be64(p->regs + PHB_INA_ERR_LOG_0, 0x0ul);
+ out_be64(p->regs + PHB_INA_ERR_LOG_1, 0x0ul);
+
+ val64 = in_be64(p->regs + PHB_INB_ERR_STATUS);
+ out_be64(p->regs + PHB_INB_ERR_STATUS, val64);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS, 0x0ul);
+ out_be64(p->regs + PHB_INB_ERR_LOG_0, 0x0ul);
+ out_be64(p->regs + PHB_INB_ERR_LOG_1, 0x0ul);
+
+ /* Rec 67/68: Clear FIR/WOF */
+ out_be64(p->regs + PHB_LEM_FIR_AND_MASK, ~fir);
+ out_be64(p->regs + PHB_LEM_WOF, 0x0ul);
+}
+
+static void phb3_read_phb_status(struct phb3 *p,
+ struct OpalIoPhb3ErrorData *stat)
+{
+ bool locked;
+ uint16_t val;
+ uint64_t *pPEST;
+ uint64_t val64 = 0;
+ uint32_t i;
+
+ memset(stat, 0, sizeof(struct OpalIoPhb3ErrorData));
+
+ /* Error data common part */
+ stat->common.version = OPAL_PHB_ERROR_DATA_VERSION_1;
+ stat->common.ioType = OPAL_PHB_ERROR_DATA_TYPE_PHB3;
+ stat->common.len = sizeof(struct OpalIoPhb3ErrorData);
+
+ /*
+ * We read some registers using config space through AIB.
+ *
+ * Get to other registers using ASB when possible to get to them
+ * through a fence if one is present.
+ */
+
+ /* Use ASB to access PCICFG if the PHB has been fenced */
+ locked = lock_recursive(&p->lock);
+ p->flags |= PHB3_CFG_USE_ASB;
+
+ /* Grab RC bridge control, make it 32-bit */
+ phb3_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &val);
+ stat->brdgCtl = val;
+
+ /* Grab UTL status registers */
+ stat->portStatusReg = hi32(phb3_read_reg_asb(p, UTL_PCIE_PORT_STATUS));
+ stat->rootCmplxStatus = hi32(phb3_read_reg_asb(p, UTL_RC_STATUS));
+ stat->busAgentStatus = hi32(phb3_read_reg_asb(p, UTL_SYS_BUS_AGENT_STATUS));
+
+ /*
+ * Grab various RC PCIe capability registers. All device, slot
+ * and link status are 16-bit, so we grab the pair control+status
+ * for each of them
+ */
+ phb3_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_DEVCTL,
+ &stat->deviceStatus);
+ phb3_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_SLOTCTL,
+ &stat->slotStatus);
+ phb3_pcicfg_read32(&p->phb, 0, p->ecap + PCICAP_EXP_LCTL,
+ &stat->linkStatus);
+
+ /*
+ * I assume those are the standard config space header, cmd & status
+ * together makes 32-bit. Secondary status is 16-bit so I'll clear
+ * the top on that one
+ */
+ phb3_pcicfg_read32(&p->phb, 0, PCI_CFG_CMD, &stat->devCmdStatus);
+ phb3_pcicfg_read16(&p->phb, 0, PCI_CFG_SECONDARY_STATUS, &val);
+ stat->devSecStatus = val;
+
+ /* Grab a bunch of AER regs */
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_RERR_STA,
+ &stat->rootErrorStatus);
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_UE_STATUS,
+ &stat->uncorrErrorStatus);
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_STATUS,
+ &stat->corrErrorStatus);
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG0,
+ &stat->tlpHdr1);
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG1,
+ &stat->tlpHdr2);
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG2,
+ &stat->tlpHdr3);
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_HDR_LOG3,
+ &stat->tlpHdr4);
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_SRCID,
+ &stat->sourceId);
+
+ /* Restore to AIB */
+ p->flags &= ~PHB3_CFG_USE_ASB;
+ if (locked) {
+ unlock(&p->lock);
+ pci_put_phb(&p->phb);
+ }
+
+ /* PEC NFIR */
+ xscom_read(p->chip_id, p->pe_xscom + 0x0, &stat->nFir);
+ xscom_read(p->chip_id, p->pe_xscom + 0x3, &stat->nFirMask);
+ xscom_read(p->chip_id, p->pe_xscom + 0x8, &stat->nFirWOF);
+
+ /* PHB3 inbound and outbound error Regs */
+ stat->phbPlssr = phb3_read_reg_asb(p, PHB_CPU_LOADSTORE_STATUS);
+ stat->phbCsr = phb3_read_reg_asb(p, PHB_DMA_CHAN_STATUS);
+ stat->lemFir = phb3_read_reg_asb(p, PHB_LEM_FIR_ACCUM);
+ stat->lemErrorMask = phb3_read_reg_asb(p, PHB_LEM_ERROR_MASK);
+ stat->lemWOF = phb3_read_reg_asb(p, PHB_LEM_WOF);
+ stat->phbErrorStatus = phb3_read_reg_asb(p, PHB_ERR_STATUS);
+ stat->phbFirstErrorStatus = phb3_read_reg_asb(p, PHB_ERR1_STATUS);
+ stat->phbErrorLog0 = phb3_read_reg_asb(p, PHB_ERR_LOG_0);
+ stat->phbErrorLog1 = phb3_read_reg_asb(p, PHB_ERR_LOG_1);
+ stat->mmioErrorStatus = phb3_read_reg_asb(p, PHB_OUT_ERR_STATUS);
+ stat->mmioFirstErrorStatus = phb3_read_reg_asb(p, PHB_OUT_ERR1_STATUS);
+ stat->mmioErrorLog0 = phb3_read_reg_asb(p, PHB_OUT_ERR_LOG_0);
+ stat->mmioErrorLog1 = phb3_read_reg_asb(p, PHB_OUT_ERR_LOG_1);
+ stat->dma0ErrorStatus = phb3_read_reg_asb(p, PHB_INA_ERR_STATUS);
+ stat->dma0FirstErrorStatus = phb3_read_reg_asb(p, PHB_INA_ERR1_STATUS);
+ stat->dma0ErrorLog0 = phb3_read_reg_asb(p, PHB_INA_ERR_LOG_0);
+ stat->dma0ErrorLog1 = phb3_read_reg_asb(p, PHB_INA_ERR_LOG_1);
+ stat->dma1ErrorStatus = phb3_read_reg_asb(p, PHB_INB_ERR_STATUS);
+ stat->dma1FirstErrorStatus = phb3_read_reg_asb(p, PHB_INB_ERR1_STATUS);
+ stat->dma1ErrorLog0 = phb3_read_reg_asb(p, PHB_INB_ERR_LOG_0);
+ stat->dma1ErrorLog1 = phb3_read_reg_asb(p, PHB_INB_ERR_LOG_1);
+
+ /*
+ * Grab PESTA & B content. The error bit (bit#0) should
+ * be fetched from IODA and the left content from memory
+ * resident tables.
+ */
+ pPEST = (uint64_t *)p->tbl_pest;
+ val64 = PHB_IODA_AD_AUTOINC;
+ val64 = SETFIELD(PHB_IODA_AD_TSEL, val64, IODA2_TBL_PESTA);
+ phb3_write_reg_asb(p, PHB_IODA_ADDR, val64);
+ for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
+ stat->pestA[i] = phb3_read_reg_asb(p, PHB_IODA_DATA0);
+ stat->pestA[i] |= pPEST[2 * i];
+ }
+
+ val64 = PHB_IODA_AD_AUTOINC;
+ val64 = SETFIELD(PHB_IODA_AD_TSEL, val64, IODA2_TBL_PESTB);
+ phb3_write_reg_asb(p, PHB_IODA_ADDR, val64);
+ for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
+ stat->pestB[i] = phb3_read_reg_asb(p, PHB_IODA_DATA0);
+ stat->pestB[i] |= pPEST[2 * i + 1];
+ }
+}
+
+static int64_t phb3_msi_get_xive(void *data,
+ uint32_t isn,
+ uint16_t *server,
+ uint8_t *prio)
+{
+ struct phb3 *p = data;
+ uint32_t chip, index, irq;
+ uint64_t ive;
+
+ chip = P8_IRQ_TO_CHIP(isn);
+ index = P8_IRQ_TO_PHB(isn);
+ irq = PHB3_IRQ_NUM(isn);
+
+ if (chip != p->chip_id ||
+ index != p->index ||
+ irq > PHB3_MSI_IRQ_MAX)
+ return OPAL_PARAMETER;
+
+ /*
+ * Each IVE has 16 bytes in cache. Note that the kernel
+ * should strip the link bits from server field.
+ */
+ ive = p->ive_cache[irq];
+ *server = GETFIELD(IODA2_IVT_SERVER, ive);
+ *prio = GETFIELD(IODA2_IVT_PRIORITY, ive);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_msi_set_xive(void *data,
+ uint32_t isn,
+ uint16_t server,
+ uint8_t prio)
+{
+ struct phb3 *p = data;
+ uint32_t chip, index;
+ uint64_t *cache, ive_num, data64, m_server, m_prio;
+ uint32_t *ive;
+
+ chip = P8_IRQ_TO_CHIP(isn);
+ index = P8_IRQ_TO_PHB(isn);
+ ive_num = PHB3_IRQ_NUM(isn);
+
+ if (p->state == PHB3_STATE_BROKEN || !p->tbl_rtt)
+ return OPAL_HARDWARE;
+ if (chip != p->chip_id ||
+ index != p->index ||
+ ive_num > PHB3_MSI_IRQ_MAX)
+ return OPAL_PARAMETER;
+
+ /*
+ * We need strip the link from server. As Milton told
+ * me, the server is assigned as follows and the left
+ * bits unused: node/chip/core/thread/link = 2/3/4/3/2
+ *
+ * Note: the server has added the link bits to server.
+ */
+ m_server = server;
+ m_prio = prio;
+
+ cache = &p->ive_cache[ive_num];
+ *cache = SETFIELD(IODA2_IVT_SERVER, *cache, m_server);
+ *cache = SETFIELD(IODA2_IVT_PRIORITY, *cache, m_prio);
+
+ /*
+ * Update IVT and IVC. We need use IVC update register
+ * to do that. Each IVE in the table has 128 bytes
+ */
+ ive = (uint32_t *)(p->tbl_ivt + ive_num * IVT_TABLE_STRIDE * 8);
+ data64 = PHB_IVC_UPDATE_ENABLE_SERVER | PHB_IVC_UPDATE_ENABLE_PRI;
+ data64 = SETFIELD(PHB_IVC_UPDATE_SID, data64, ive_num);
+ data64 = SETFIELD(PHB_IVC_UPDATE_SERVER, data64, m_server);
+ data64 = SETFIELD(PHB_IVC_UPDATE_PRI, data64, m_prio);
+
+ /*
+ * We don't use SETFIELD because we are doing a 32-bit access
+ * in order to avoid touching the P and Q bits
+ */
+ *ive = (m_server << 8) | m_prio;
+ out_be64(p->regs + PHB_IVC_UPDATE, data64);
+
+ /*
+ * Handle Q bit if we're going to enable the interrupt.
+ * The OS should make sure the interrupt handler has
+ * been installed already.
+ */
+ if (prio != 0xff)
+ phb3_pci_msi_check_q(p, ive_num);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_lsi_get_xive(void *data,
+ uint32_t isn,
+ uint16_t *server,
+ uint8_t *prio)
+{
+ struct phb3 *p = data;
+ uint32_t chip, index, irq;
+ uint64_t lxive;
+
+ chip = P8_IRQ_TO_CHIP(isn);
+ index = P8_IRQ_TO_PHB(isn);
+ irq = PHB3_IRQ_NUM(isn);
+
+ if (chip != p->chip_id ||
+ index != p->index ||
+ irq < PHB3_LSI_IRQ_MIN ||
+ irq > PHB3_LSI_IRQ_MAX)
+ return OPAL_PARAMETER;
+
+ lxive = p->lxive_cache[irq - PHB3_LSI_IRQ_MIN];
+ *server = GETFIELD(IODA2_LXIVT_SERVER, lxive);
+ *prio = GETFIELD(IODA2_LXIVT_PRIORITY, lxive);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_lsi_set_xive(void *data,
+ uint32_t isn,
+ uint16_t server,
+ uint8_t prio)
+{
+ struct phb3 *p = data;
+ uint32_t chip, index, irq, entry;
+ uint64_t lxive;
+
+ chip = P8_IRQ_TO_CHIP(isn);
+ index = P8_IRQ_TO_PHB(isn);
+ irq = PHB3_IRQ_NUM(isn);
+
+ if (p->state == PHB3_STATE_BROKEN)
+ return OPAL_HARDWARE;
+
+ if (chip != p->chip_id ||
+ index != p->index ||
+ irq < PHB3_LSI_IRQ_MIN ||
+ irq > PHB3_LSI_IRQ_MAX)
+ return OPAL_PARAMETER;
+
+ lxive = SETFIELD(IODA2_LXIVT_SERVER, 0ul, server);
+ lxive = SETFIELD(IODA2_LXIVT_PRIORITY, lxive, prio);
+
+ /*
+ * We cache the arguments because we have to mangle
+ * it in order to hijack 3 bits of priority to extend
+ * the server number
+ */
+ entry = irq - PHB3_LSI_IRQ_MIN;
+ p->lxive_cache[entry] = lxive;
+
+ /* We use HRT entry 0 always for now */
+ phb3_ioda_sel(p, IODA2_TBL_LXIVT, entry, false);
+ lxive = in_be64(p->regs + PHB_IODA_DATA0);
+ lxive = SETFIELD(IODA2_LXIVT_SERVER, lxive, server);
+ lxive = SETFIELD(IODA2_LXIVT_PRIORITY, lxive, prio);
+ out_be64(p->regs + PHB_IODA_DATA0, lxive);
+
+ return OPAL_SUCCESS;
+}
+
+static void phb3_err_interrupt(void *data, uint32_t isn)
+{
+ struct phb3 *p = data;
+
+ PHBDBG(p, "Got interrupt 0x%08x\n", isn);
+
+ /* Update pending event */
+ opal_update_pending_evt(OPAL_EVENT_PCI_ERROR,
+ OPAL_EVENT_PCI_ERROR);
+
+ /* If the PHB is broken, go away */
+ if (p->state == PHB3_STATE_BROKEN)
+ return;
+
+ /*
+ * Mark the PHB has pending error so that the OS
+ * can handle it at late point.
+ */
+ phb3_set_err_pending(p, true);
+}
+
+/* MSIs (OS owned) */
+static const struct irq_source_ops phb3_msi_irq_ops = {
+ .get_xive = phb3_msi_get_xive,
+ .set_xive = phb3_msi_set_xive,
+};
+
+/* LSIs (OS owned) */
+static const struct irq_source_ops phb3_lsi_irq_ops = {
+ .get_xive = phb3_lsi_get_xive,
+ .set_xive = phb3_lsi_set_xive,
+};
+
+/* Error LSIs (skiboot owned) */
+static const struct irq_source_ops phb3_err_lsi_irq_ops = {
+ .get_xive = phb3_lsi_get_xive,
+ .set_xive = phb3_lsi_set_xive,
+ .interrupt = phb3_err_interrupt,
+};
+
+static int64_t phb3_set_pe(struct phb *phb,
+ uint64_t pe_num,
+ uint64_t bdfn,
+ uint8_t bcompare,
+ uint8_t dcompare,
+ uint8_t fcompare,
+ uint8_t action)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t mask, val, tmp, idx;
+ int32_t all = 0;
+ uint16_t *rte;
+
+ /* Sanity check */
+ if (!p->tbl_rtt)
+ return OPAL_HARDWARE;
+ if (action != OPAL_MAP_PE && action != OPAL_UNMAP_PE)
+ return OPAL_PARAMETER;
+ if (pe_num >= PHB3_MAX_PE_NUM || bdfn > 0xffff ||
+ bcompare > OpalPciBusAll ||
+ dcompare > OPAL_COMPARE_RID_DEVICE_NUMBER ||
+ fcompare > OPAL_COMPARE_RID_FUNCTION_NUMBER)
+ return OPAL_PARAMETER;
+
+ /* Figure out the RID range */
+ if (bcompare == OpalPciBusAny) {
+ mask = 0x0;
+ val = 0x0;
+ all = 0x1;
+ } else {
+ tmp = ((0x1 << (bcompare + 1)) - 1) << (15 - bcompare);
+ mask = tmp;
+ val = bdfn & tmp;
+ }
+
+ if (dcompare == OPAL_IGNORE_RID_DEVICE_NUMBER)
+ all = (all << 1) | 0x1;
+ else {
+ mask |= 0xf8;
+ val |= (bdfn & 0xf8);
+ }
+
+ if (fcompare == OPAL_IGNORE_RID_FUNCTION_NUMBER)
+ all = (all << 1) | 0x1;
+ else {
+ mask |= 0x7;
+ val |= (bdfn & 0x7);
+ }
+
+ /* Map or unmap the RTT range */
+ if (all == 0x7) {
+ if (action == OPAL_MAP_PE) {
+ for (idx = 0; idx < RTT_TABLE_ENTRIES; idx++)
+ p->rte_cache[idx] = pe_num;
+ } else {
+ memset(p->rte_cache, 0xff, RTT_TABLE_SIZE);
+ }
+ memcpy((void *)p->tbl_rtt, p->rte_cache, RTT_TABLE_SIZE);
+ out_be64(p->regs + PHB_RTC_INVALIDATE,
+ PHB_RTC_INVALIDATE_ALL);
+ } else {
+ rte = (uint16_t *)p->tbl_rtt;
+ for (idx = 0; idx < RTT_TABLE_ENTRIES; idx++, rte++) {
+ if ((idx & mask) != val)
+ continue;
+ p->rte_cache[idx] = (action ? pe_num : 0xffff);
+ *rte = p->rte_cache[idx];
+
+ /*
+ * We might not need invalidate RTC one by one since
+ * the RTT is expected to be updated in batch mode
+ * in host kernel.
+ */
+ out_be64(p->regs + PHB_RTC_INVALIDATE,
+ SETFIELD(PHB_RTC_INVALIDATE_RID, 0ul, idx));
+ }
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_set_peltv(struct phb *phb,
+ uint32_t parent_pe,
+ uint32_t child_pe,
+ uint8_t state)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint8_t *peltv;
+ uint32_t idx, mask;
+
+ /* Sanity check */
+ if (!p->tbl_peltv)
+ return OPAL_HARDWARE;
+ if (parent_pe >= PHB3_MAX_PE_NUM || child_pe >= PHB3_MAX_PE_NUM)
+ return OPAL_PARAMETER;
+
+ /* Find index for parent PE */
+ idx = parent_pe * (PHB3_MAX_PE_NUM / 8);
+ idx += (child_pe / 8);
+ mask = 0x1 << (7 - (child_pe % 8));
+
+ peltv = (uint8_t *)p->tbl_peltv;
+ peltv += idx;
+ if (state) {
+ *peltv |= mask;
+ p->peltv_cache[idx] |= mask;
+ } else {
+ *peltv &= ~mask;
+ p->peltv_cache[idx] &= ~mask;
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_link_state(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ uint16_t lstat;
+ int64_t rc;
+
+ /* XXX Test for PHB in error state ? */
+
+ /* Link is up, let's find the actual speed */
+ if (!(reg & PHB_PCIE_DLP_TC_DL_LINKACT))
+ return OPAL_SHPC_LINK_DOWN;
+
+ rc = phb3_pcicfg_read16(&p->phb, 0, p->ecap + PCICAP_EXP_LSTAT,
+ &lstat);
+ if (rc < 0) {
+ /* Shouldn't happen */
+ PHBERR(p, "Failed to read link status\n");
+ return OPAL_HARDWARE;
+ }
+ if (!(lstat & PCICAP_EXP_LSTAT_DLLL_ACT))
+ return OPAL_SHPC_LINK_DOWN;
+
+ return GETFIELD(PCICAP_EXP_LSTAT_WIDTH, lstat);
+}
+
+static int64_t phb3_power_state(struct phb __unused *phb)
+{
+ /* XXX Test for PHB in error state ? */
+
+ /* XXX TODO - External power control ? */
+
+ return OPAL_SHPC_POWER_ON;
+}
+
+static int64_t phb3_slot_power_off(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+
+ if (p->state == PHB3_STATE_BROKEN)
+ return OPAL_HARDWARE;
+ if (p->state != PHB3_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* XXX TODO - External power control ? */
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_slot_power_on(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+
+ if (p->state == PHB3_STATE_BROKEN)
+ return OPAL_HARDWARE;
+ if (p->state != PHB3_STATE_FUNCTIONAL)
+ return OPAL_BUSY;
+
+ /* XXX TODO - External power control ? */
+
+ return OPAL_SUCCESS;
+}
+
+static void phb3_setup_for_link_down(struct phb3 *p)
+{
+ uint32_t reg32;
+
+ /* Mark link down */
+ p->has_link = false;
+
+ /* Mask PCIE port interrupts */
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad42800000000000);
+
+ /* Mask AER receiver error */
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_MASK, &reg32);
+ reg32 |= PCIECAP_AER_CE_RECVR_ERR;
+ phb3_pcicfg_write32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_MASK, reg32);
+}
+
+static void phb3_setup_for_link_up(struct phb3 *p)
+{
+ uint32_t reg32;
+
+ /* Clear AER receiver error status */
+ phb3_pcicfg_write32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_STATUS,
+ PCIECAP_AER_CE_RECVR_ERR);
+ /* Unmask receiver error status in AER */
+ phb3_pcicfg_read32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_MASK, &reg32);
+ reg32 &= ~PCIECAP_AER_CE_RECVR_ERR;
+ phb3_pcicfg_write32(&p->phb, 0, p->aercap + PCIECAP_AER_CE_MASK, reg32);
+
+ /* Clear spurrious errors and enable PCIE port interrupts */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xffdfffffffffffff);
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad5a800000000000);
+
+ /* Mark link down */
+ p->has_link = true;
+
+ /* Don't block PCI-CFG */
+ p->flags &= ~PHB3_CFG_BLOCKED;
+}
+
+static int64_t phb3_sm_link_poll(struct phb3 *p)
+{
+ uint64_t reg;
+
+ /* This is the state machine to wait for the link to come
+ * up. Currently we just wait until we timeout, eventually
+ * we want to add retries and fallback to Gen1.
+ */
+ switch(p->state) {
+ case PHB3_STATE_WAIT_LINK_ELECTRICAL:
+ /* Wait for the link electrical connection to be
+ * established (shorter timeout). This allows us to
+ * workaround spurrious presence detect on some machines
+ * without waiting 10s each time
+ *
+ * Note: We *also* check for the full link up bit here
+ * because simics doesn't seem to implement the electrical
+ * link bit at all
+ */
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (reg & (PHB_PCIE_DLP_INBAND_PRESENCE |
+ PHB_PCIE_DLP_TC_DL_LINKACT)) {
+ PHBDBG(p, "Electrical link detected...\n");
+ p->state = PHB3_STATE_WAIT_LINK;
+ p->retries = PHB3_LINK_WAIT_RETRIES;
+ } else if (p->retries-- == 0) {
+ PHBDBG(p, "Timeout waiting for electrical link\n");
+ PHBDBG(p, "DLP train control: 0x%016llx\n", reg);
+ /* No link, we still mark the PHB as functional */
+ p->state = PHB3_STATE_FUNCTIONAL;
+ return OPAL_SUCCESS;
+ }
+ return phb3_set_sm_timeout(p, msecs_to_tb(100));
+ case PHB3_STATE_WAIT_LINK:
+ /* XXX I used the PHB_PCIE_LINK_MANAGEMENT register here but
+ * simics doesn't seem to give me anything, so I've switched
+ * to PCIE_DLP_TRAIN_CTL which appears more reliable
+ */
+ reg = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (reg & PHB_PCIE_DLP_TC_DL_LINKACT) {
+ /* Setup PHB for link up */
+ phb3_setup_for_link_up(p);
+ PHBDBG(p, "Link is up!\n");
+ p->state = PHB3_STATE_FUNCTIONAL;
+ return OPAL_SUCCESS;
+ }
+ if (p->retries-- == 0) {
+ PHBDBG(p, "Timeout waiting for link up\n");
+ PHBDBG(p, "DLP train control: 0x%016llx\n", reg);
+ /* No link, we still mark the PHB as functional */
+ p->state = PHB3_STATE_FUNCTIONAL;
+ return OPAL_SUCCESS;
+ }
+ return phb3_set_sm_timeout(p, msecs_to_tb(100));
+ default:
+ /* How did we get here ? */
+ assert(false);
+ }
+ return OPAL_HARDWARE;
+}
+
+static int64_t phb3_start_link_poll(struct phb3 *p)
+{
+ /*
+ * Wait for link up to 10s. However, we give up after
+ * only a second if the electrical connection isn't
+ * stablished according to the DLP link control register
+ */
+ p->retries = PHB3_LINK_ELECTRICAL_RETRIES;
+ p->state = PHB3_STATE_WAIT_LINK_ELECTRICAL;
+ return phb3_set_sm_timeout(p, msecs_to_tb(100));
+}
+
+static int64_t phb3_sm_hot_reset(struct phb3 *p)
+{
+ uint16_t brctl;
+
+ switch (p->state) {
+ case PHB3_STATE_FUNCTIONAL:
+ /* We need do nothing with available slot */
+ if (phb3_presence_detect(&p->phb) != OPAL_SHPC_DEV_PRESENT) {
+ PHBDBG(p, "Slot hreset: no device\n");
+ return OPAL_CLOSED;
+ }
+
+ /* Prepare for link going down */
+ phb3_setup_for_link_down(p);
+
+ /* Turn on hot reset */
+ phb3_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
+ brctl |= PCI_CFG_BRCTL_SECONDARY_RESET;
+ phb3_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
+ PHBDBG(p, "Slot hreset: assert reset\n");
+
+ p->state = PHB3_STATE_HRESET_DELAY;
+ return phb3_set_sm_timeout(p, secs_to_tb(1));
+ case PHB3_STATE_HRESET_DELAY:
+ /* Turn off hot reset */
+ phb3_pcicfg_read16(&p->phb, 0, PCI_CFG_BRCTL, &brctl);
+ brctl &= ~PCI_CFG_BRCTL_SECONDARY_RESET;
+ phb3_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, brctl);
+ PHBDBG(p, "Slot hreset: deassert reset\n");
+
+ /*
+ * Due to some oddball adapters bouncing the link
+ * training a couple of times, we wait for a full second
+ * before we start checking the link status, otherwise
+ * we can get a spurrious link down interrupt which
+ * causes us to EEH immediately.
+ */
+ p->state = PHB3_STATE_HRESET_DELAY2;
+ return phb3_set_sm_timeout(p, secs_to_tb(1));
+ case PHB3_STATE_HRESET_DELAY2:
+ return phb3_start_link_poll(p);
+ default:
+ PHBDBG(p, "Slot hreset: wrong state %d\n", p->state);
+ break;
+ }
+
+ p->state = PHB3_STATE_FUNCTIONAL;
+ return OPAL_HARDWARE;
+}
+
+static int64_t phb3_hot_reset(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+
+ if (p->state != PHB3_STATE_FUNCTIONAL) {
+ PHBDBG(p, "phb3_hot_reset: wrong state %d\n",
+ p->state);
+ return OPAL_HARDWARE;
+ }
+
+ p->flags |= PHB3_CFG_BLOCKED;
+ return phb3_sm_hot_reset(p);
+}
+
+static int64_t phb3_sm_fundamental_reset(struct phb3 *p)
+{
+ uint64_t reg;
+
+
+ /*
+ * Check if there's something connected. We do that here
+ * instead of the switch case below because we want to do
+ * that before we test the skip_perst
+ */
+ if (p->state == PHB3_STATE_FUNCTIONAL &&
+ phb3_presence_detect(&p->phb) != OPAL_SHPC_DEV_PRESENT) {
+ PHBDBG(p, "Slot freset: no device\n");
+ return OPAL_CLOSED;
+ }
+
+ /* Handle boot time skipping of reset */
+ if (p->skip_perst && p->state == PHB3_STATE_FUNCTIONAL) {
+ PHBINF(p, "Cold boot, skipping PERST assertion\n");
+ p->state = PHB3_STATE_FRESET_ASSERT_DELAY;
+ /* PERST skipping happens only once */
+ p->skip_perst = false;
+ }
+
+ switch(p->state) {
+ case PHB3_STATE_FUNCTIONAL:
+ PHBINF(p, "Performing PERST...\n");
+
+ /* Prepare for link going down */
+ phb3_setup_for_link_down(p);
+
+ /* Assert PERST */
+ reg = in_be64(p->regs + PHB_RESET);
+ reg &= ~0x2000000000000000ul;
+ out_be64(p->regs + PHB_RESET, reg);
+ PHBDBG(p, "Slot freset: Asserting PERST\n");
+
+ /* XXX Check delay for PERST... doing 1s for now */
+ p->state = PHB3_STATE_FRESET_ASSERT_DELAY;
+ return phb3_set_sm_timeout(p, secs_to_tb(1));
+
+ case PHB3_STATE_FRESET_ASSERT_DELAY:
+ /* Deassert PERST */
+ reg = in_be64(p->regs + PHB_RESET);
+ reg |= 0x2000000000000000ul;
+ out_be64(p->regs + PHB_RESET, reg);
+ PHBDBG(p, "Slot freset: Deasserting PERST\n");
+
+ /* Wait 200ms before polling link */
+ p->state = PHB3_STATE_FRESET_DEASSERT_DELAY;
+ return phb3_set_sm_timeout(p, msecs_to_tb(200));
+
+ case PHB3_STATE_FRESET_DEASSERT_DELAY:
+ /* Switch to generic link poll state machine */
+ return phb3_start_link_poll(p);
+
+ default:
+ PHBDBG(p, "Slot freset: wrong state %d\n",
+ p->state);
+ break;
+ }
+
+ p->state = PHB3_STATE_FUNCTIONAL;
+ return OPAL_HARDWARE;
+}
+
+static int64_t phb3_fundamental_reset(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+
+ if (p->state != PHB3_STATE_FUNCTIONAL) {
+ PHBDBG(p, "phb3_fundamental_reset: wrong state %d\n", p->state);
+ return OPAL_HARDWARE;
+ }
+
+ p->flags |= PHB3_CFG_BLOCKED;
+ return phb3_sm_fundamental_reset(p);
+}
+
+/*
+ * The OS is expected to do fundamental reset after complete
+ * reset to make sure the PHB could be recovered from the
+ * fenced state. However, the OS needn't do that explicitly
+ * since fundamental reset will be done automatically while
+ * powering on the PHB.
+ *
+ *
+ * Usually, we need power off/on the PHB. That includes the
+ * fundamental reset. However, we don't know how to control
+ * the power stuff yet. So skip that and do fundamental reset
+ * directly after reinitialization the hardware.
+ */
+static int64_t phb3_sm_complete_reset(struct phb3 *p)
+{
+ uint64_t cqsts, val;
+
+ switch (p->state) {
+ case PHB3_STATE_FENCED:
+ case PHB3_STATE_FUNCTIONAL:
+ /*
+ * The users might be doing error injection through PBCQ
+ * Error Inject Control Register. Without clearing that,
+ * we will get recrusive error during recovery and it will
+ * fail eventually.
+ */
+ xscom_write(p->chip_id, p->pe_xscom + 0xa, 0x0ul);
+
+ /*
+ * We might have escalated frozen state on non-existing PE
+ * to fenced PHB. For the case, the PHB isn't fenced in the
+ * hardware level and it's not safe to do ETU reset. So we
+ * have to force fenced PHB prior to ETU reset.
+ */
+ if (!phb3_fenced(p))
+ xscom_write(p->chip_id, p->pe_xscom + 0x2, 0x000000f000000000ull);
+
+ /* Clear errors in NFIR and raise ETU reset */
+ xscom_read(p->chip_id, p->pe_xscom + 0x0, &p->nfir_cache);
+
+ xscom_read(p->chip_id, p->spci_xscom + 1, &val);/* HW275117 */
+ xscom_write(p->chip_id, p->pci_xscom + 0xa,
+ 0x8000000000000000);
+ p->state = PHB3_STATE_CRESET_WAIT_CQ;
+ p->retries = 500;
+ return phb3_set_sm_timeout(p, msecs_to_tb(10));
+ case PHB3_STATE_CRESET_WAIT_CQ:
+ xscom_read(p->chip_id, p->pe_xscom + 0x1c, &val);
+ xscom_read(p->chip_id, p->pe_xscom + 0x1d, &val);
+ xscom_read(p->chip_id, p->pe_xscom + 0x1e, &val);
+ xscom_read(p->chip_id, p->pe_xscom + 0xf, &cqsts);
+ if (!(cqsts & 0xC000000000000000)) {
+ xscom_write(p->chip_id, p->pe_xscom + 0x1, ~p->nfir_cache);
+
+ p->state = PHB3_STATE_CRESET_REINIT;
+ return phb3_set_sm_timeout(p, msecs_to_tb(100));
+ }
+
+ if (p->retries-- == 0) {
+ PHBERR(p, "Timeout waiting for pending transaction\n");
+ goto error;
+ }
+ return phb3_set_sm_timeout(p, msecs_to_tb(10));
+ case PHB3_STATE_CRESET_REINIT:
+ p->flags &= ~PHB3_AIB_FENCED;
+ phb3_init_hw(p);
+
+ p->state = PHB3_STATE_CRESET_FRESET;
+ return phb3_set_sm_timeout(p, msecs_to_tb(100));
+ case PHB3_STATE_CRESET_FRESET:
+ p->state = PHB3_STATE_FUNCTIONAL;
+ p->flags |= PHB3_CFG_BLOCKED;
+ return phb3_sm_fundamental_reset(p);
+ default:
+ assert(false);
+ }
+
+ /* Mark the PHB as dead and expect it to be removed */
+error:
+ p->state = PHB3_STATE_BROKEN;
+ return OPAL_PARAMETER;
+}
+
+static int64_t phb3_complete_reset(struct phb *phb, uint8_t assert)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+
+ if ((assert == OPAL_ASSERT_RESET &&
+ p->state != PHB3_STATE_FUNCTIONAL &&
+ p->state != PHB3_STATE_FENCED) ||
+ (assert == OPAL_DEASSERT_RESET &&
+ p->state != PHB3_STATE_FUNCTIONAL)) {
+ PHBERR(p, "phb3_creset: wrong state %d\n",
+ p->state);
+ return OPAL_HARDWARE;
+ }
+
+ /* Block PCI-CFG access */
+ p->flags |= PHB3_CFG_BLOCKED;
+
+ if (assert == OPAL_ASSERT_RESET) {
+ PHBINF(p, "Starting PHB reset sequence\n");
+ return phb3_sm_complete_reset(p);
+ } else {
+ return phb3_sm_hot_reset(p);
+ }
+}
+
+static int64_t phb3_poll(struct phb *phb)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t now = mftb();
+
+ if (p->state == PHB3_STATE_FUNCTIONAL)
+ return OPAL_SUCCESS;
+
+ /* Check timer */
+ if (p->delay_tgt_tb &&
+ tb_compare(now, p->delay_tgt_tb) == TB_ABEFOREB)
+ return p->delay_tgt_tb - now;
+
+ /* Expired (or not armed), clear it */
+ p->delay_tgt_tb = 0;
+
+ /* Dispatch to the right state machine */
+ switch(p->state) {
+ case PHB3_STATE_HRESET_DELAY:
+ case PHB3_STATE_HRESET_DELAY2:
+ return phb3_sm_hot_reset(p);
+ case PHB3_STATE_FRESET_ASSERT_DELAY:
+ case PHB3_STATE_FRESET_DEASSERT_DELAY:
+ return phb3_sm_fundamental_reset(p);
+ case PHB3_STATE_CRESET_WAIT_CQ:
+ case PHB3_STATE_CRESET_REINIT:
+ case PHB3_STATE_CRESET_FRESET:
+ return phb3_sm_complete_reset(p);
+ case PHB3_STATE_WAIT_LINK_ELECTRICAL:
+ case PHB3_STATE_WAIT_LINK:
+ return phb3_sm_link_poll(p);
+ default:
+ PHBDBG(p, "phb3_poll: wrong state %d\n", p->state);
+ break;
+ }
+
+ /* Unknown state, could be a HW error */
+ return OPAL_HARDWARE;
+}
+
+static int64_t phb3_eeh_freeze_status(struct phb *phb, uint64_t pe_number,
+ uint8_t *freeze_state,
+ uint16_t *pci_error_type,
+ uint16_t *severity,
+ uint64_t *phb_status)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t peev_bit = PPC_BIT(pe_number & 0x3f);
+ uint64_t peev, pesta, pestb;
+
+ /* Defaults: not frozen */
+ *freeze_state = OPAL_EEH_STOPPED_NOT_FROZEN;
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+
+ /* Check dead */
+ if (p->state == PHB3_STATE_BROKEN) {
+ *freeze_state = OPAL_EEH_STOPPED_MMIO_DMA_FREEZE;
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ if (severity)
+ *severity = OPAL_EEH_SEV_PHB_DEAD;
+ return OPAL_HARDWARE;
+ }
+
+ /* Check fence */
+ if (phb3_fenced(p)) {
+ *freeze_state = OPAL_EEH_STOPPED_MMIO_DMA_FREEZE;
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ if (severity)
+ *severity = OPAL_EEH_SEV_PHB_FENCED;
+ goto bail;
+ }
+
+ /* Check the PEEV */
+ phb3_ioda_sel(p, IODA2_TBL_PEEV, pe_number / 64, false);
+ peev = in_be64(p->regs + PHB_IODA_DATA0);
+ if (!(peev & peev_bit))
+ return OPAL_SUCCESS;
+
+ /* Indicate that we have an ER pending */
+ phb3_set_err_pending(p, true);
+ if (severity)
+ *severity = OPAL_EEH_SEV_PE_ER;
+
+ /* Read the PESTA & PESTB */
+ phb3_ioda_sel(p, IODA2_TBL_PESTA, pe_number, false);
+ pesta = in_be64(p->regs + PHB_IODA_DATA0);
+ phb3_ioda_sel(p, IODA2_TBL_PESTB, pe_number, false);
+ pestb = in_be64(p->regs + PHB_IODA_DATA0);
+
+ /* Convert them */
+ if (pesta & IODA2_PESTA_MMIO_FROZEN)
+ *freeze_state |= OPAL_EEH_STOPPED_MMIO_FREEZE;
+ if (pestb & IODA2_PESTB_DMA_STOPPED)
+ *freeze_state |= OPAL_EEH_STOPPED_DMA_FREEZE;
+
+bail:
+ if (phb_status)
+ phb3_read_phb_status(p,
+ (struct OpalIoPhb3ErrorData *)phb_status);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_eeh_freeze_clear(struct phb *phb, uint64_t pe_number,
+ uint64_t eeh_action_token)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t err, peev[4];
+ int32_t i;
+ bool frozen_pe = false;
+
+ if (p->state == PHB3_STATE_BROKEN)
+ return OPAL_HARDWARE;
+
+ /* Summary. If nothing, move to clearing the PESTs which can
+ * contain a freeze state from a previous error or simply set
+ * explicitely by the user
+ */
+ err = in_be64(p->regs + PHB_ETU_ERR_SUMMARY);
+ if (err == 0xffffffffffffffff) {
+ if (phb3_fenced(p)) {
+ PHBERR(p, "eeh_freeze_clear on fenced PHB\n");
+ return OPAL_HARDWARE;
+ }
+ }
+ if (err != 0)
+ phb3_err_ER_clear(p);
+
+ /*
+ * We have PEEV in system memory. It would give more performance
+ * to access that directly.
+ */
+ if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO) {
+ phb3_ioda_sel(p, IODA2_TBL_PESTA, pe_number, false);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+ }
+ if (eeh_action_token & OPAL_EEH_ACTION_CLEAR_FREEZE_DMA) {
+ phb3_ioda_sel(p, IODA2_TBL_PESTB, pe_number, false);
+ out_be64(p->regs + PHB_IODA_DATA0, 0);
+ }
+
+
+ /* Update ER pending indication */
+ phb3_ioda_sel(p, IODA2_TBL_PEEV, 0, true);
+ for (i = 0; i < ARRAY_SIZE(peev); i++) {
+ peev[i] = in_be64(p->regs + PHB_IODA_DATA0);
+ if (peev[i]) {
+ frozen_pe = true;
+ break;
+ }
+ }
+ if (frozen_pe) {
+ p->err.err_src = PHB3_ERR_SRC_PHB;
+ p->err.err_class = PHB3_ERR_CLASS_ER;
+ p->err.err_bit = -1;
+ phb3_set_err_pending(p, true);
+ } else
+ phb3_set_err_pending(p, false);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_eeh_next_error(struct phb *phb,
+ uint64_t *first_frozen_pe,
+ uint16_t *pci_error_type,
+ uint16_t *severity)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t fir, peev[4];
+ uint32_t cfg32;
+ int32_t i, j;
+
+ /* If the PHB is broken, we needn't go forward */
+ if (p->state == PHB3_STATE_BROKEN) {
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_PHB_DEAD;
+ return OPAL_SUCCESS;
+ }
+
+ /*
+ * Check if we already have pending errors. If that's
+ * the case, then to get more information about the
+ * pending errors. Here we try PBCQ prior to PHB.
+ */
+ if (phb3_err_pending(p) &&
+ !phb3_err_check_pbcq(p) &&
+ !phb3_err_check_lem(p))
+ phb3_set_err_pending(p, false);
+
+ /* Clear result */
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+ *first_frozen_pe = (uint64_t)-1;
+
+ /* Check frozen PEs */
+ if (!phb3_err_pending(p)) {
+ phb3_ioda_sel(p, IODA2_TBL_PEEV, 0, true);
+ for (i = 0; i < ARRAY_SIZE(peev); i++) {
+ peev[i] = in_be64(p->regs + PHB_IODA_DATA0);
+ if (peev[i]) {
+ p->err.err_src = PHB3_ERR_SRC_PHB;
+ p->err.err_class = PHB3_ERR_CLASS_ER;
+ p->err.err_bit = -1;
+ phb3_set_err_pending(p, true);
+ break;
+ }
+ }
+ }
+
+ /* Mapping errors */
+ if (phb3_err_pending(p)) {
+ /*
+ * If the frozen PE is caused by a malfunctioning TLP, we
+ * need reset the PHB. So convert ER to PHB-fatal error
+ * for the case.
+ */
+ if (p->err.err_class == PHB3_ERR_CLASS_ER) {
+ fir = phb3_read_reg_asb(p, PHB_LEM_FIR_ACCUM);
+ if (fir & PPC_BIT(60)) {
+ phb3_pcicfg_read32(&p->phb, 0,
+ p->aercap + PCIECAP_AER_UE_STATUS, &cfg32);
+ if (cfg32 & PCIECAP_AER_UE_MALFORMED_TLP)
+ p->err.err_class = PHB3_ERR_CLASS_FENCED;
+ }
+ }
+
+ switch (p->err.err_class) {
+ case PHB3_ERR_CLASS_DEAD:
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_PHB_DEAD;
+ break;
+ case PHB3_ERR_CLASS_FENCED:
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_PHB_FENCED;
+ break;
+ case PHB3_ERR_CLASS_ER:
+ *pci_error_type = OPAL_EEH_PE_ERROR;
+ *severity = OPAL_EEH_SEV_PE_ER;
+
+ phb3_ioda_sel(p, IODA2_TBL_PEEV, 0, true);
+ for (i = 0; i < ARRAY_SIZE(peev); i++)
+ peev[i] = in_be64(p->regs + PHB_IODA_DATA0);
+ for (i = ARRAY_SIZE(peev) - 1; i >= 0; i--) {
+ for (j = 0; j < 64; j++) {
+ if (peev[i] & PPC_BIT(j)) {
+ *first_frozen_pe = i * 64 + j;
+ break;
+ }
+ }
+
+ if (*first_frozen_pe != (uint64_t)(-1))
+ break;
+ }
+
+ /* No frozen PE ? */
+ if (*first_frozen_pe == (uint64_t)-1) {
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+ phb3_set_err_pending(p, false);
+ }
+
+ break;
+ case PHB3_ERR_CLASS_INF:
+ *pci_error_type = OPAL_EEH_PHB_ERROR;
+ *severity = OPAL_EEH_SEV_INF;
+ break;
+ default:
+ *pci_error_type = OPAL_EEH_NO_ERROR;
+ *severity = OPAL_EEH_SEV_NO_ERROR;
+ phb3_set_err_pending(p, false);
+ }
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t phb3_get_diag_data(struct phb *phb,
+ void *diag_buffer,
+ uint64_t diag_buffer_len)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ struct OpalIoPhb3ErrorData *data = diag_buffer;
+
+ if (diag_buffer_len < sizeof(struct OpalIoPhb3ErrorData))
+ return OPAL_PARAMETER;
+ if (p->state == PHB3_STATE_BROKEN)
+ return OPAL_HARDWARE;
+
+ /*
+ * Dummy check for fence so that phb3_read_phb_status knows
+ * whether to use ASB or AIB
+ */
+ phb3_fenced(p);
+ phb3_read_phb_status(p, data);
+
+ /*
+ * We're running to here probably because of errors
+ * (INF class). For that case, we need clear the error
+ * explicitly.
+ */
+ if (phb3_err_pending(p) &&
+ p->err.err_class == PHB3_ERR_CLASS_INF &&
+ p->err.err_src == PHB3_ERR_SRC_PHB) {
+ phb3_err_ER_clear(p);
+ phb3_set_err_pending(p, false);
+ }
+
+ return OPAL_SUCCESS;
+}
+
+static uint64_t capp_fsp_lid_load(void)
+{
+ uint32_t lid_no = 0x80a02001; /* murano dd2.1 */
+
+#define CAPP_UCODE_MAX_SIZE 0x4000
+ void *data = malloc(CAPP_UCODE_MAX_SIZE);
+ size_t size;
+ int rc;
+ if (!data) {
+ prerror("PHB3: Failed to allocated memory for capp ucode lid\n");
+ return 0;
+ }
+ size = CAPP_UCODE_MAX_SIZE;
+ rc = fsp_fetch_data(0, FSP_DATASET_NONSP_LID, lid_no, 0, data, &size);
+ if (rc) {
+ prerror("PHB3: Error %d loading capp ucode lid\n", rc);
+ free(data);
+ return 0;
+ }
+
+ return (uint64_t)data;
+}
+
+static int64_t capp_load_ucode(struct phb3 *p)
+{
+
+ struct capp_ucode_lid_hdr *ucode_hdr;
+ struct capp_ucode_data_hdr *data_hdr;
+ uint64_t data, *val;
+ int size_read = 0;
+ int i;
+
+ /* if fsp not present p->ucode_base gotten from device tree */
+ if (fsp_present() && (p->capp_ucode_base == 0))
+ p->capp_ucode_base = capp_fsp_lid_load();
+
+ if (p->capp_ucode_base == 0) {
+ PHBERR(p, "capp ucode base address not set\n");
+ return OPAL_HARDWARE;
+ }
+
+ PHBINF(p, "Loading capp microcode @%llx\n", p->capp_ucode_base);
+ ucode_hdr = (struct capp_ucode_lid_hdr *)(p->capp_ucode_base);
+ if (ucode_hdr->eyecatcher != 0x43415050554c4944) {
+ PHBERR(p, "capi ucode lid header eyecatcher not found\n");
+ return OPAL_HARDWARE;
+ }
+
+ data_hdr = (struct capp_ucode_data_hdr *)((uint64_t)ucode_hdr + sizeof(*ucode_hdr));
+ while (size_read < ucode_hdr->data_size) {
+ if (data_hdr->eyecatcher != 0x4341505055434F44) {
+ PHBERR(p, "capi ucode data header eyecatcher not found!\n");
+ return OPAL_HARDWARE;
+ }
+
+ val = (uint64_t *)data_hdr + sizeof(*data_hdr)/sizeof(uint64_t);
+ if (data_hdr->reg == apc_master_cresp) {
+ xscom_write(p->chip_id, CAPP_APC_MASTER_ARRAY_ADDR_REG, 0);
+ for (i = 0; i < data_hdr->num_data_chunks; i++)
+ xscom_write(p->chip_id, CAPP_APC_MASTER_ARRAY_WRITE_REG, *val++);
+ xscom_read(p->chip_id, CAPP_APC_MASTER_ARRAY_ADDR_REG, &data);
+ } else if (data_hdr->reg == apc_master_uop_table) {
+ xscom_write(p->chip_id, CAPP_APC_MASTER_ARRAY_ADDR_REG, 0x180ULL << 52);
+ for (i = 0; i < data_hdr->num_data_chunks; i++)
+ xscom_write(p->chip_id, CAPP_APC_MASTER_ARRAY_WRITE_REG, *val++);
+ xscom_read(p->chip_id, CAPP_APC_MASTER_ARRAY_ADDR_REG, &data);
+ } else if (data_hdr->reg == snp_ttype) {
+ xscom_write(p->chip_id, CAPP_SNP_ARRAY_ADDR_REG, 0x5000ULL << 48);
+ for (i = 0; i < data_hdr->num_data_chunks; i++)
+ xscom_write(p->chip_id, CAPP_SNP_ARRAY_WRITE_REG, *val++);
+ xscom_read(p->chip_id, CAPP_SNP_ARRAY_ADDR_REG, &data);
+ } else if (data_hdr->reg == snp_uop_table) {
+ xscom_write(p->chip_id, CAPP_SNP_ARRAY_ADDR_REG, 0x4000ULL << 48);
+ for (i = 0; i < data_hdr->num_data_chunks; i++)
+ xscom_write(p->chip_id, CAPP_SNP_ARRAY_WRITE_REG, *val++);
+ xscom_read(p->chip_id, CAPP_SNP_ARRAY_ADDR_REG, &data);
+ }
+
+ size_read += sizeof(*data_hdr) + data_hdr->num_data_chunks * 8;
+ data_hdr = (struct capp_ucode_data_hdr *)((uint64_t *)data_hdr +
+ sizeof(*data_hdr)/8 + data_hdr->num_data_chunks);
+ }
+
+ p->capp_ucode_loaded = true;
+ return OPAL_SUCCESS;
+}
+
+static void phb3_init_capp_regs(struct phb3 *p)
+{
+ /* writing these vals directly based on lab procedures
+ but some values included in microcode need to investigate */
+
+ /* port0 port1
+ * 100 PHB0 disabled
+ * we're told it's the same for Venice
+ */
+ xscom_write(p->chip_id, APC_MASTER_PB_CTRL, 0x10000000000000FF);
+ xscom_write(p->chip_id, APC_MASTER_CONFIG, 0x4070000000000000);
+
+ /* tlb and mmio */
+ xscom_write(p->chip_id, TRANSPORT_CONTROL, 0x4028000100000000);
+
+ xscom_write(p->chip_id, CANNED_PRESP_MAP0, 0);
+ xscom_write(p->chip_id, CANNED_PRESP_MAP1, 0xFFFFFFFF00000000);
+ xscom_write(p->chip_id, CANNED_PRESP_MAP2, 0);
+
+ /* error recovery */
+ xscom_write(p->chip_id, CAPP_ERR_STATUS_CTRL, 0);
+
+ xscom_write(p->chip_id, FLUSH_SUE_STATE_MAP, 0x0ABCDEF000000000);
+ xscom_write(p->chip_id, CAPP_EPOCH_TIMER_CTRL, 0x00000000FFF8FFE0);
+ xscom_write(p->chip_id, FLUSH_UOP_CONFIG1, 0xB188280728000000);
+ xscom_write(p->chip_id, FLUSH_UOP_CONFIG2, 0xB188400F00000000);
+ xscom_write(p->chip_id, SNOOP_CAPI_CONFIG, 0x01F0000000000000);
+}
+
+/* override some inits with CAPI defaults */
+static void phb3_init_capp_errors(struct phb3 *p)
+{
+ out_be64(p->regs + PHB_ERR_AIB_FENCE_ENABLE, 0xffffffdd0c80ffc0);
+ out_be64(p->regs + PHB_OUT_ERR_AIB_FENCE_ENABLE, 0x9cf3fe08f8dc700f);
+ out_be64(p->regs + PHB_INA_ERR_AIB_FENCE_ENABLE, 0xffff57fbff01ffde);
+ out_be64(p->regs + PHB_INB_ERR_AIB_FENCE_ENABLE, 0xfcffe0fbff7ff0ec);
+}
+
+static int64_t phb3_set_capi_mode(struct phb *phb, uint64_t mode,
+ uint64_t pe_number)
+{
+ struct phb3 *p = phb_to_phb3(phb);
+ uint64_t reg;
+ int i;
+
+ if (mode != 1)
+ return OPAL_PARAMETER;
+
+ /* poll cqstat */
+ for (i = 0; i < 500; i++) {
+ xscom_read(p->chip_id, p->pe_xscom + 0xf, &reg);
+ if (!(reg & 0xC000000000000000))
+ break;
+ time_wait_ms(10);
+ }
+ if (reg & 0xC000000000000000) {
+ PHBERR(p, "Timeout waiting for pending transaction\n");
+ return OPAL_HARDWARE;
+ }
+
+ xscom_write(p->chip_id, p->spci_xscom + 0x3, 0x8000000000000000ull);
+ /* FIXME security timer bar
+ xscom_write(p->chip_id, p->spci_xscom + 0x4, 0x8000000000000000ull);
+ */
+
+ /* aib mode */
+ xscom_read(p->chip_id, p->pci_xscom + 0xf, &reg);
+ reg &= ~PPC_BITMASK(6,7);
+ reg |= PPC_BIT(8);
+ reg |= PPC_BITMASK(40, 41);
+ reg &= ~PPC_BIT(42);
+ xscom_write(p->chip_id, p->pci_xscom + 0xf, reg);
+
+ /* pci hwconf0 */
+ xscom_read(p->chip_id, p->pe_xscom + 0x18, &reg);
+ reg |= PPC_BIT(14);
+ reg &= ~PPC_BIT(15);
+ xscom_write(p->chip_id, p->pe_xscom + 0x18, reg);
+
+ /* pci hwconf1 */
+ xscom_read(p->chip_id, p->pe_xscom + 0x19, &reg);
+ reg &= ~PPC_BITMASK(17,18);
+ xscom_write(p->chip_id, p->pe_xscom + 0x19, reg);
+
+ /* aib tx cmd cred */
+ xscom_read(p->chip_id, p->pci_xscom + 0xd, &reg);
+ reg &= ~PPC_BITMASK(42,46);
+ reg |= PPC_BIT(47);
+ xscom_write(p->chip_id, p->pci_xscom + 0xd, reg);
+
+ xscom_write(p->chip_id, p->pci_xscom + 0xc, 0xff00000000000000ull);
+
+ /* pci mode ctl */
+ xscom_read(p->chip_id, p->pe_xscom + 0xb, &reg);
+ reg |= PPC_BIT(25);
+ xscom_write(p->chip_id, p->pe_xscom + 0xb, reg);
+
+ /* set tve no translate mode allow mmio window */
+ memset(p->tve_cache, 0x0, sizeof(p->tve_cache));
+ /* Allow address range 0x0002000000000000: 0x0002FFFFFFFFFFF */
+ p->tve_cache[pe_number * 2] = 0x000000FFFFFF0a00ULL;
+
+ phb3_ioda_sel(p, IODA2_TBL_TVT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->tve_cache); i++)
+ out_be64(p->regs + PHB_IODA_DATA0, p->tve_cache[i]);
+
+ /* set m64 bar to pass mmio window */
+ memset(p->m64b_cache, 0x0, sizeof(p->m64b_cache));
+ p->m64b_cache[0] = PPC_BIT(0); /*enable*/
+ p->m64b_cache[0] |= PPC_BIT(1); /*single pe*/
+ p->m64b_cache[0] |= (p->mm0_base << 12) | ((pe_number & 0x3e0) << 27); /*base and upper pe*/
+ p->m64b_cache[0] |= 0x3fffc000 | (pe_number & 0x1f); /*mask and lower pe*/
+
+ p->m64b_cache[1] = PPC_BIT(0); /*enable*/
+ p->m64b_cache[1] |= PPC_BIT(1); /*single pe*/
+ p->m64b_cache[1] |= (0x0002000000000000ULL << 12) | ((pe_number & 0x3e0) << 27); /*base and upper pe*/
+ p->m64b_cache[1] |= 0x3f000000 | (pe_number & 0x1f); /*mask and lower pe*/
+
+ phb3_ioda_sel(p, IODA2_TBL_M64BT, 0, true);
+ for (i = 0; i < ARRAY_SIZE(p->m64b_cache); i++)
+ out_be64(p->regs + PHB_IODA_DATA0, p->m64b_cache[i]);
+
+ out_be64(p->regs + PHB_PHB3_CONFIG, PHB_PHB3C_64B_TCE_EN);
+ out_be64(p->regs + PHB_PHB3_CONFIG, PHB_PHB3C_64BIT_MSI_EN);
+
+ phb3_init_capp_errors(p);
+
+ phb3_init_capp_regs(p);
+ return OPAL_SUCCESS;
+}
+
+static const struct phb_ops phb3_ops = {
+ .lock = phb3_lock,
+ .unlock = phb3_unlock,
+ .cfg_read8 = phb3_pcicfg_read8,
+ .cfg_read16 = phb3_pcicfg_read16,
+ .cfg_read32 = phb3_pcicfg_read32,
+ .cfg_write8 = phb3_pcicfg_write8,
+ .cfg_write16 = phb3_pcicfg_write16,
+ .cfg_write32 = phb3_pcicfg_write32,
+ .choose_bus = phb3_choose_bus,
+ .device_init = phb3_device_init,
+ .presence_detect = phb3_presence_detect,
+ .ioda_reset = phb3_ioda_reset,
+ .pci_reinit = phb3_pci_reinit,
+ .set_phb_mem_window = phb3_set_phb_mem_window,
+ .phb_mmio_enable = phb3_phb_mmio_enable,
+ .map_pe_mmio_window = phb3_map_pe_mmio_window,
+ .map_pe_dma_window = phb3_map_pe_dma_window,
+ .map_pe_dma_window_real = phb3_map_pe_dma_window_real,
+ .pci_msi_eoi = phb3_pci_msi_eoi,
+ .set_xive_pe = phb3_set_ive_pe,
+ .get_msi_32 = phb3_get_msi_32,
+ .get_msi_64 = phb3_get_msi_64,
+ .set_pe = phb3_set_pe,
+ .set_peltv = phb3_set_peltv,
+ .link_state = phb3_link_state,
+ .power_state = phb3_power_state,
+ .slot_power_off = phb3_slot_power_off,
+ .slot_power_on = phb3_slot_power_on,
+ .hot_reset = phb3_hot_reset,
+ .fundamental_reset = phb3_fundamental_reset,
+ .complete_reset = phb3_complete_reset,
+ .poll = phb3_poll,
+ .eeh_freeze_status = phb3_eeh_freeze_status,
+ .eeh_freeze_clear = phb3_eeh_freeze_clear,
+ .next_error = phb3_eeh_next_error,
+ .get_diag_data = NULL,
+ .get_diag_data2 = phb3_get_diag_data,
+ .set_capi_mode = phb3_set_capi_mode,
+};
+
+/*
+ * We should access those registers at the stage since the
+ * AIB isn't ready yet.
+ */
+static void phb3_setup_aib(struct phb3 *p)
+{
+ /* Init_2 - AIB TX Channel Mapping Register */
+ phb3_write_reg_asb(p, PHB_AIB_TX_CHAN_MAPPING, 0x0211230000000000);
+
+ /* Init_3 - AIB RX command credit register */
+ if (p->rev >= PHB3_REV_VENICE_DD20)
+ phb3_write_reg_asb(p, PHB_AIB_RX_CMD_CRED, 0x0020000100020001);
+ else
+ phb3_write_reg_asb(p, PHB_AIB_RX_CMD_CRED, 0x0020000100010001);
+
+ /* Init_4 - AIB rx data credit register */
+ if (p->rev >= PHB3_REV_VENICE_DD20)
+ phb3_write_reg_asb(p, PHB_AIB_RX_DATA_CRED, 0x0020002000010001);
+ else
+ phb3_write_reg_asb(p, PHB_AIB_RX_DATA_CRED, 0x0020002000000001);
+
+ /* Init_5 - AIB rx credit init timer register */
+ phb3_write_reg_asb(p, PHB_AIB_RX_CRED_INIT_TIMER, 0x0f00000000000000);
+
+ /* Init_6 - AIB Tag Enable register */
+ phb3_write_reg_asb(p, PHB_AIB_TAG_ENABLE, 0xffffffff00000000);
+
+ /* Init_7 - TCE Tag Enable register */
+ phb3_write_reg_asb(p, PHB_TCE_TAG_ENABLE, 0xffffffff00000000);
+}
+
+static void phb3_init_ioda2(struct phb3 *p)
+{
+ /* Init_14 - LSI Source ID */
+ out_be64(p->regs + PHB_LSI_SOURCE_ID,
+ SETFIELD(PHB_LSI_SRC_ID, 0ul, 0xff));
+
+ /* Init_15 - IVT BAR / Length
+ * Init_16 - RBA BAR
+ * - RTT BAR
+ * Init_17 - PELT-V BAR
+ */
+ out_be64(p->regs + PHB_RTT_BAR,
+ p->tbl_rtt | PHB_RTT_BAR_ENABLE);
+ out_be64(p->regs + PHB_PELTV_BAR,
+ p->tbl_peltv | PHB_PELTV_BAR_ENABLE);
+ out_be64(p->regs + PHB_IVT_BAR,
+ p->tbl_ivt | 0x800 | PHB_IVT_BAR_ENABLE);
+
+ /* DD2.0 or the subsequent chips don't have memory
+ * resident RBA.
+ */
+ if (p->rev >= PHB3_REV_MURANO_DD20)
+ out_be64(p->regs + PHB_RBA_BAR, 0x0ul);
+ else
+ out_be64(p->regs + PHB_RBA_BAR,
+ p->tbl_rba | PHB_RBA_BAR_ENABLE);
+
+ /* Init_18..21 - Setup M32 */
+ out_be64(p->regs + PHB_M32_BASE_ADDR, p->mm1_base);
+ out_be64(p->regs + PHB_M32_BASE_MASK, ~(M32_PCI_SIZE - 1));
+ out_be64(p->regs + PHB_M32_START_ADDR, M32_PCI_START);
+
+ /* Init_22 - Setup PEST BAR */
+ out_be64(p->regs + PHB_PEST_BAR,
+ p->tbl_pest | PHB_PEST_BAR_ENABLE);
+
+ /* Init_23 - PCIE Outbound upper address */
+ out_be64(p->regs + PHB_M64_UPPER_BITS, 0);
+
+ /* Init_24 - Interrupt represent timers
+ * The register doesn't take effect on Murano DD1.0
+ */
+ if (p->rev >= PHB3_REV_MURANO_DD20)
+ out_be64(p->regs + PHB_INTREP_TIMER, 0x0004000000000000);
+ else
+ out_be64(p->regs + PHB_INTREP_TIMER, 0);
+
+ /* Init_25 - PHB3 Configuration Register. Clear TCE cache then
+ * configure the PHB
+ */
+ out_be64(p->regs + PHB_PHB3_CONFIG, PHB_PHB3C_64B_TCE_EN);
+ out_be64(p->regs + PHB_PHB3_CONFIG,
+ PHB_PHB3C_M32_EN | PHB_PHB3C_32BIT_MSI_EN |
+ PHB_PHB3C_64BIT_MSI_EN);
+
+ /* Init_26 - At least 512ns delay according to spec */
+ time_wait_ms(1);
+
+ /* Init_27..36 - On-chip IODA tables init */
+ phb3_ioda_reset(&p->phb, false);
+}
+
+static bool phb3_wait_dlp_reset(struct phb3 *p)
+{
+ unsigned int i;
+ uint64_t val;
+
+ /*
+ * Firmware cannot access the UTL core regs or PCI config space
+ * until the cores are out of DL_PGRESET.
+ * DL_PGRESET should be polled until it is inactive with a value
+ * of '0'. The recommended polling frequency is once every 1ms.
+ * Firmware should poll at least 200 attempts before giving up.
+ * MMIO Stores to the link are silently dropped by the UTL core if
+ * the link is down.
+ * MMIO Loads to the link will be dropped by the UTL core and will
+ * eventually time-out and will return an all ones response if the
+ * link is down.
+ */
+#define DLP_RESET_ATTEMPTS 400
+
+ PHBDBG(p, "Waiting for DLP PG reset to complete...\n");
+ for (i = 0; i < DLP_RESET_ATTEMPTS; i++) {
+ val = in_be64(p->regs + PHB_PCIE_DLP_TRAIN_CTL);
+ if (!(val & PHB_PCIE_DLP_TC_DL_PGRESET))
+ break;
+ time_wait_ms(1);
+ }
+ if (val & PHB_PCIE_DLP_TC_DL_PGRESET) {
+ PHBERR(p, "Timeout waiting for DLP PG reset !\n");
+ return false;
+ }
+ return true;
+}
+
+/* phb3_init_rc - Initialize the Root Complex config space
+ */
+static bool phb3_init_rc_cfg(struct phb3 *p)
+{
+ int64_t ecap, aercap;
+
+ /* XXX Handle errors ? */
+
+ /* Init_45..46:
+ *
+ * Set primary bus to 0, secondary to 1 and subordinate to 0xff
+ */
+ phb3_pcicfg_write32(&p->phb, 0, PCI_CFG_PRIMARY_BUS, 0x00ff0100);
+
+ /* Init_47..52
+ *
+ * IO and Memory base & limits are set to base > limit, which
+ * allows all inbounds.
+ *
+ * XXX This has the potential of confusing the OS which might
+ * think that nothing is forwarded downstream. We probably need
+ * to fix this to match the IO and M32 PHB windows
+ */
+ phb3_pcicfg_write16(&p->phb, 0, PCI_CFG_IO_BASE, 0x0010);
+ phb3_pcicfg_write32(&p->phb, 0, PCI_CFG_MEM_BASE, 0x00000010);
+ phb3_pcicfg_write32(&p->phb, 0, PCI_CFG_PREF_MEM_BASE, 0x00000010);
+
+ /* Init_53..54 - Setup bridge control enable forwarding of CORR, FATAL,
+ * and NONFATAL errors
+ */
+ phb3_pcicfg_write16(&p->phb, 0, PCI_CFG_BRCTL, PCI_CFG_BRCTL_SERR_EN);
+
+ /* Init_55..56
+ *
+ * PCIE Device control/status, enable error reporting, disable relaxed
+ * ordering, set MPS to 128 (see note), clear errors.
+ *
+ * Note: The doc recommends to set MPS to 4K. This has proved to have
+ * some issues as it requires specific claming of MRSS on devices and
+ * we've found devices in the field that misbehave when doing that.
+ *
+ * We currently leave it all to 128 bytes (minimum setting) at init
+ * time. The generic PCIe probing later on might apply a different
+ * value, or the kernel will, but we play it safe at early init
+ */
+ if (p->ecap <= 0) {
+ ecap = pci_find_cap(&p->phb, 0, PCI_CFG_CAP_ID_EXP);
+ if (ecap < 0) {
+ PHBERR(p, "Can't locate PCI-E capability\n");
+ return false;
+ }
+ p->ecap = ecap;
+ } else {
+ ecap = p->ecap;
+ }
+
+ phb3_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DEVSTAT,
+ PCICAP_EXP_DEVSTAT_CE |
+ PCICAP_EXP_DEVSTAT_NFE |
+ PCICAP_EXP_DEVSTAT_FE |
+ PCICAP_EXP_DEVSTAT_UE);
+
+ phb3_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DEVCTL,
+ PCICAP_EXP_DEVCTL_CE_REPORT |
+ PCICAP_EXP_DEVCTL_NFE_REPORT |
+ PCICAP_EXP_DEVCTL_FE_REPORT |
+ PCICAP_EXP_DEVCTL_UR_REPORT |
+ SETFIELD(PCICAP_EXP_DEVCTL_MPS, 0, PCIE_MPS_128B));
+
+ /* Init_57..58
+ *
+ * Root Control Register. Enable error reporting
+ *
+ * Note: Added CRS visibility.
+ */
+ phb3_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_RC,
+ PCICAP_EXP_RC_SYSERR_ON_CE |
+ PCICAP_EXP_RC_SYSERR_ON_NFE |
+ PCICAP_EXP_RC_SYSERR_ON_FE |
+ PCICAP_EXP_RC_CRS_VISIBLE);
+
+ /* Init_59..60
+ *
+ * Device Control 2. Enable ARI fwd, set timer to RTOS timer
+ */
+ phb3_pcicfg_write16(&p->phb, 0, ecap + PCICAP_EXP_DCTL2,
+ SETFIELD(PCICAP_EXP_DCTL2_CMPTOUT, 0, 0xf) |
+ PCICAP_EXP_DCTL2_ARI_FWD);
+
+ /* Init_61..76
+ *
+ * AER inits
+ */
+ aercap = pci_find_ecap(&p->phb, 0, PCIECAP_ID_AER, NULL);
+ if (aercap < 0) {
+ /* Shouldn't happen */
+ PHBERR(p, "Failed to locate AER Ecapability in bridge\n");
+ return false;
+ }
+ p->aercap = aercap;
+
+ /* Clear all UE status */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_STATUS,
+ 0xffffffff);
+ /* Disable some error reporting as per the PHB3 spec */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_MASK,
+ PCIECAP_AER_UE_POISON_TLP |
+ PCIECAP_AER_UE_COMPL_TIMEOUT |
+ PCIECAP_AER_UE_COMPL_ABORT |
+ PCIECAP_AER_UE_ECRC);
+ /* Report some errors as fatal */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_UE_SEVERITY,
+ PCIECAP_AER_UE_DLP |
+ PCIECAP_AER_UE_SURPRISE_DOWN |
+ PCIECAP_AER_UE_FLOW_CTL_PROT |
+ PCIECAP_AER_UE_UNEXP_COMPL |
+ PCIECAP_AER_UE_RECV_OVFLOW |
+ PCIECAP_AER_UE_MALFORMED_TLP);
+ /* Clear all CE status */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CE_STATUS,
+ 0xffffffff);
+ /* Disable some error reporting as per the PHB3 spec */
+ /* Note: When link down, also disable rcvr errors */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CE_MASK,
+ PCIECAP_AER_CE_ADV_NONFATAL |
+ p->has_link ? 0 : PCIECAP_AER_CE_RECVR_ERR);
+ /* Enable ECRC generation & checking */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_CAPCTL,
+ PCIECAP_AER_CAPCTL_ECRCG_EN |
+ PCIECAP_AER_CAPCTL_ECRCC_EN);
+ /* Enable reporting in root error control */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_RERR_CMD,
+ PCIECAP_AER_RERR_CMD_FE |
+ PCIECAP_AER_RERR_CMD_NFE |
+ PCIECAP_AER_RERR_CMD_CE);
+ /* Clear root error status */
+ phb3_pcicfg_write32(&p->phb, 0, aercap + PCIECAP_AER_RERR_STA,
+ 0xffffffff);
+
+ return true;
+}
+
+static void phb3_init_utl(struct phb3 *p)
+{
+ /* Init_77..79: Clear spurrious errors and assign errors to the
+ * right "interrupt" signal
+ */
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_ERR_SEVERITY, 0x5000000000000000);
+ out_be64(p->regs + UTL_SYS_BUS_AGENT_IRQ_EN, 0xfcc0000000000000);
+
+ /* Init_80..81: Setup tag allocations
+ *
+ * Don't touch UTL_GBIF_READ_TAGS_ALLOC, it differs betwen PHBs
+ * and the default is correct
+ */
+ out_be64(p->regs + UTL_PCIE_TAGS_ALLOC, 0x0800000000000000);
+
+ /* Init_82: PCI Express port control */
+ out_be64(p->regs + UTL_PCIE_PORT_CONTROL, 0x8588006000000000);
+
+ /* Init_83..85: Clean & setup port errors */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xffdfffffffffffff);
+ out_be64(p->regs + UTL_PCIE_PORT_ERROR_SEV, 0x5039000000000000);
+
+ if (p->has_link)
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad5a800000000000);
+ else
+ out_be64(p->regs + UTL_PCIE_PORT_IRQ_EN, 0xad42800000000000);
+
+ /* Init_86 : Cleanup RC errors */
+ out_be64(p->regs + UTL_RC_STATUS, 0xffffffffffffffff);
+}
+
+static void phb3_init_errors(struct phb3 *p)
+{
+ /* Init_88: LEM Error Mask : Temporarily disable error interrupts */
+ out_be64(p->regs + PHB_LEM_ERROR_MASK, 0xffffffffffffffff);
+
+ /* Init_89..97: Disable all error interrupts until end of init */
+ out_be64(p->regs + PHB_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR_LEM_ENABLE, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_ERR_FREEZE_ENABLE, 0x0000000080800000);
+ out_be64(p->regs + PHB_ERR_AIB_FENCE_ENABLE, 0xffffffdd0c00ffc0);
+ out_be64(p->regs + PHB_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_ERR1_STATUS_MASK, 0x0000000000000000);
+
+ /* Init_98_106: Configure MMIO error traps & clear old state
+ *
+ * Don't enable BAR multi-hit detection in bit 41.
+ */
+ out_be64(p->regs + PHB_OUT_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR_LEM_ENABLE, 0xfdffffffffbfffff);
+ out_be64(p->regs + PHB_OUT_ERR_FREEZE_ENABLE, 0x0000420800000000);
+ out_be64(p->regs + PHB_OUT_ERR_AIB_FENCE_ENABLE, 0x9cf3bc00f89c700f);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_OUT_ERR_STATUS_MASK, 0x0000000000400000);
+ out_be64(p->regs + PHB_OUT_ERR1_STATUS_MASK, 0x0000000000400000);
+
+ /* Init_107_115: Configure DMA_A error traps & clear old state */
+ out_be64(p->regs + PHB_INA_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR_LEM_ENABLE, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_INA_ERR_FREEZE_ENABLE, 0xc00003a901006000);
+ out_be64(p->regs + PHB_INA_ERR_AIB_FENCE_ENABLE, 0x3fff5452fe019fde);
+ out_be64(p->regs + PHB_INA_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_INA_ERR1_STATUS_MASK, 0x0000000000000000);
+
+ /* Init_116_124: Configure DMA_B error traps & clear old state */
+ out_be64(p->regs + PHB_INB_ERR_STATUS, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_LEM_ENABLE, 0xffffffffffffffff);
+
+ /*
+ * Workaround for errata HW257476, turn correctable messages into
+ * ER freezes on Murano and Venice DD1.0
+ */
+ if (p->rev < PHB3_REV_MURANO_DD20)
+ out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE,
+ 0x0000600000000070);
+ else
+ out_be64(p->regs + PHB_INB_ERR_FREEZE_ENABLE,
+ 0x0000600000000060);
+
+ out_be64(p->regs + PHB_INB_ERR_AIB_FENCE_ENABLE, 0xfcff80fbff7ff08c);
+ out_be64(p->regs + PHB_INB_ERR_LOG_0, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_LOG_1, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR_STATUS_MASK, 0x0000000000000000);
+ out_be64(p->regs + PHB_INB_ERR1_STATUS_MASK, 0x0000000000000000);
+
+ /* Init_125..128: Cleanup & configure LEM */
+ out_be64(p->regs + PHB_LEM_FIR_ACCUM, 0x0000000000000000);
+ out_be64(p->regs + PHB_LEM_ACTION0, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_LEM_ACTION1, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_LEM_WOF, 0x0000000000000000);
+}
+
+static void phb3_init_hw(struct phb3 *p)
+{
+ uint64_t val;
+
+ PHBDBG(p, "Initializing PHB...\n");
+
+ /* Lift reset */
+ xscom_read(p->chip_id, p->spci_xscom + 1, &val);/* HW275117 */
+ xscom_write(p->chip_id, p->pci_xscom + 0xa, 0);
+ time_wait_ms(100);
+
+ /* Grab version and fit it in an int */
+ val = phb3_read_reg_asb(p, PHB_VERSION);
+ if (val == 0 || val == 0xffffffffffffffff) {
+ PHBERR(p, "Failed to read version, PHB appears broken\n");
+ goto failed;
+ }
+
+ p->rev = ((val >> 16) & 0x00ff0000) | (val & 0xffff);
+ PHBDBG(p, "Core revision 0x%x\n", p->rev);
+
+ /* Setup AIB credits etc... */
+ phb3_setup_aib(p);
+
+ /* Init_8 - PCIE System Configuration Register
+ *
+ * Not changed from default values. Beware that bits [04:09] should
+ * be different between PHBs (x16 vs x8).
+ */
+ PHBDBG(p, "Default system config: 0x%016llx\n",
+ in_be64(p->regs + PHB_PCIE_SYSTEM_CONFIG));
+ if (p->index == 2)
+ val = 0x421000fc00000000;
+ else
+ val = 0x441000fc00000000;
+ val |= (uint64_t)p->max_link_speed << PPC_BITLSHIFT(35);
+ out_be64(p->regs + PHB_PCIE_SYSTEM_CONFIG, val);
+
+ PHBDBG(p, "New system config : 0x%016llx\n",
+ in_be64(p->regs + PHB_PCIE_SYSTEM_CONFIG));
+
+ /* Init_9..12 - PCIE DLP Lane EQ control */
+ if (p->lane_eq) {
+ out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL0,
+ be64_to_cpu(p->lane_eq[0]));
+ out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL1,
+ be64_to_cpu(p->lane_eq[1]));
+ out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL2,
+ be64_to_cpu(p->lane_eq[2]));
+ out_be64(p->regs + PHB_PCIE_LANE_EQ_CNTL3,
+ be64_to_cpu(p->lane_eq[3]));
+ }
+
+ /* Init_XX - (PHB2 errata)
+ *
+ * Set proper credits, needs adjustment due to wrong defaults
+ * on PHB2 before we lift the reset.
+ */
+ if (p->index == 2)
+ out_be64(p->regs + PHB_PCIE_SYS_LINK_INIT, 0x9008133332120000);
+
+ /* Init_13 - PCIE Reset */
+ /*
+ * Lift the PHB resets but not PERST, this will be lifted
+ * later by the initial PERST state machine
+ */
+ PHBDBG(p, "PHB_RESET is 0x%016llx\n", in_be64(p->regs + PHB_RESET));
+ out_be64(p->regs + PHB_RESET, 0xd000000000000000);
+
+ /* Architected IODA2 inits */
+ phb3_init_ioda2(p);
+
+ /* Init_37..42 - Clear UTL & DLP error logs */
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG1, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG2, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG3, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_UTL_ERRLOG4, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_DLP_ERRLOG1, 0xffffffffffffffff);
+ out_be64(p->regs + PHB_PCIE_DLP_ERRLOG2, 0xffffffffffffffff);
+
+ /* Init_43 - Wait for UTL core to come out of reset */
+ if (!phb3_wait_dlp_reset(p))
+ goto failed;
+
+ /* Init_44 - Clear port status */
+ out_be64(p->regs + UTL_PCIE_PORT_STATUS, 0xffffffffffffffff);
+
+ /* Init_45..76: Init root complex config space */
+ if (!phb3_init_rc_cfg(p))
+ goto failed;
+
+ /* Init_77..86 : Init UTL */
+ phb3_init_utl(p);
+
+ /*
+ * Init_87: PHB Control register. Various PHB settings
+ * Enable IVC for Murano DD2.0 or later one
+ */
+#ifdef IVT_TABLE_IVE_16B
+ val = 0xf3a80e4b00000000;
+#else
+ val = 0xf3a80ecb00000000;
+#endif
+ if (p->rev >= PHB3_REV_MURANO_DD20)
+ val |= 0x0000010000000000;
+ out_be64(p->regs + PHB_CONTROL, val);
+
+ /* Init_88..128 : Setup error registers */
+ phb3_init_errors(p);
+
+ /* Init_129: Read error summary */
+ val = in_be64(p->regs + PHB_ETU_ERR_SUMMARY);
+ if (val) {
+ PHBERR(p, "Errors detected during PHB init: 0x%16llx\n", val);
+ goto failed;
+ }
+
+ /* NOTE: At this point the spec waits for the link to come up. We
+ * don't bother as we are doing a PERST soon.
+ */
+
+ /* XXX I don't know why the spec does this now and not earlier, so
+ * to be sure to get it right we might want to move it to the freset
+ * state machine, though the generic PCI layer will probably do
+ * this anyway (ie, enable MEM, etc... in the RC)
+ *
+ * Note:The spec enables IO but PHB3 doesn't do IO space .... so we
+ * leave that clear.
+ */
+ phb3_pcicfg_write16(&p->phb, 0, PCI_CFG_CMD,
+ PCI_CFG_CMD_MEM_EN |
+ PCI_CFG_CMD_BUS_MASTER_EN |
+ PCI_CFG_CMD_PERR_RESP |
+ PCI_CFG_CMD_SERR_EN);
+
+ /* Clear errors */
+ phb3_pcicfg_write16(&p->phb, 0, PCI_CFG_STAT,
+ PCI_CFG_STAT_SENT_TABORT |
+ PCI_CFG_STAT_RECV_TABORT |
+ PCI_CFG_STAT_RECV_MABORT |
+ PCI_CFG_STAT_SENT_SERR |
+ PCI_CFG_STAT_RECV_PERR);
+
+ /* Init_136 - Re-enable error interrupts */
+
+ /* TBD: Should we mask any of these for PERST ? */
+ out_be64(p->regs + PHB_ERR_IRQ_ENABLE, 0x0000002280b80000);
+ out_be64(p->regs + PHB_OUT_ERR_IRQ_ENABLE, 0x600c42fc042080f0);
+ out_be64(p->regs + PHB_INA_ERR_IRQ_ENABLE, 0xc000a3a901826020);
+ out_be64(p->regs + PHB_INB_ERR_IRQ_ENABLE, 0x0000600000800070);
+ out_be64(p->regs + PHB_LEM_ERROR_MASK, 0x42498e327f502eae);
+
+ /*
+ * Init_141 - Enable DMA address speculation
+ *
+ * Errata#20131017: Disable speculation until Murano DD2.0
+ *
+ * Note: We keep IVT speculation disabled (bit 4). It should work with
+ * Murano DD2.0 and later but lacks sufficient testing. We will re-enable
+ * it once that has been done.
+ */
+ if (p->rev >= PHB3_REV_MURANO_DD20)
+ out_be64(p->regs + PHB_TCE_SPEC_CTL, 0xf000000000000000);
+ else
+ out_be64(p->regs + PHB_TCE_SPEC_CTL, 0x0ul);
+
+ /* Errata#20131017: avoid TCE queue overflow */
+ if (p->rev == PHB3_REV_MURANO_DD20)
+ phb3_write_reg_asb(p, PHB_TCE_WATERMARK, 0x0003000000030302);
+
+ /* Init_142 - PHB3 - Timeout Control Register 1 */
+ out_be64(p->regs + PHB_TIMEOUT_CTRL1, 0x1713132016200000);
+
+ /* Init_143 - PHB3 - Timeout Control Register 2 */
+ out_be64(p->regs + PHB_TIMEOUT_CTRL2, 0x2320d71600000000);
+
+ /* Mark the PHB as functional which enables all the various sequences */
+ p->state = PHB3_STATE_FUNCTIONAL;
+
+ PHBDBG(p, "Initialization complete\n");
+
+ return;
+
+ failed:
+ PHBERR(p, "Initialization failed\n");
+ p->state = PHB3_STATE_BROKEN;
+}
+
+static void phb3_allocate_tables(struct phb3 *p)
+{
+ /* XXX Our current memalign implementation sucks,
+ *
+ * It will do the job, however it doesn't support freeing
+ * the memory and wastes space by always allocating twice
+ * as much as requested (size + alignment)
+ */
+ p->tbl_rtt = (uint64_t)local_alloc(p->chip_id, RTT_TABLE_SIZE, RTT_TABLE_SIZE);
+ assert(p->tbl_rtt);
+ memset((void *)p->tbl_rtt, 0, RTT_TABLE_SIZE);
+
+ p->tbl_peltv = (uint64_t)local_alloc(p->chip_id, PELTV_TABLE_SIZE, PELTV_TABLE_SIZE);
+ assert(p->tbl_peltv);
+ memset((void *)p->tbl_peltv, 0, PELTV_TABLE_SIZE);
+
+ p->tbl_pest = (uint64_t)local_alloc(p->chip_id, PEST_TABLE_SIZE, PEST_TABLE_SIZE);
+ assert(p->tbl_pest);
+ memset((void *)p->tbl_pest, 0, PEST_TABLE_SIZE);
+
+ p->tbl_ivt = (uint64_t)local_alloc(p->chip_id, IVT_TABLE_SIZE, IVT_TABLE_SIZE);
+ assert(p->tbl_ivt);
+ memset((void *)p->tbl_ivt, 0, IVT_TABLE_SIZE);
+
+ p->tbl_rba = (uint64_t)local_alloc(p->chip_id, RBA_TABLE_SIZE, RBA_TABLE_SIZE);
+ assert(p->tbl_rba);
+ memset((void *)p->tbl_rba, 0, RBA_TABLE_SIZE);
+}
+
+static void phb3_add_properties(struct phb3 *p)
+{
+ struct dt_node *np = p->phb.dt_node;
+ uint32_t lsibase, icsp = get_ics_phandle();
+ uint64_t m32b, m64b, m64s, reg, tkill;
+
+ reg = cleanup_addr((uint64_t)p->regs);
+
+ /* Add various properties that HB doesn't have to
+ * add, some of them simply because they result from
+ * policy decisions made in skiboot rather than in HB
+ * such as the MMIO windows going to PCI, interrupts,
+ * etc...
+ */
+ dt_add_property_cells(np, "#address-cells", 3);
+ dt_add_property_cells(np, "#size-cells", 2);
+ dt_add_property_cells(np, "#interrupt-cells", 1);
+ dt_add_property_cells(np, "bus-range", 0, 0xff);
+ dt_add_property_cells(np, "clock-frequency", 0x200, 0); /* ??? */
+
+ dt_add_property_cells(np, "interrupt-parent", icsp);
+
+ /* XXX FIXME: add slot-name */
+ //dt_property_cell("bus-width", 8); /* Figure it out from VPD ? */
+
+ /* "ranges", we only expose M32 (PHB3 doesn't do IO)
+ *
+ * Note: The kernel expects us to have chopped of 64k from the
+ * M32 size (for the 32-bit MSIs). If we don't do that, it will
+ * get confused (OPAL does it)
+ */
+ m32b = cleanup_addr(p->mm1_base);
+ m64b = cleanup_addr(p->mm0_base);
+ m64s = p->mm0_size;
+ dt_add_property_cells(np, "ranges",
+ /* M32 space */
+ 0x02000000, 0x00000000, M32_PCI_START,
+ hi32(m32b), lo32(m32b), 0, M32_PCI_SIZE - 0x10000);
+
+ /* XXX FIXME: add opal-memwin32, dmawins, etc... */
+ dt_add_property_cells(np, "ibm,opal-m64-window",
+ hi32(m64b), lo32(m64b),
+ hi32(m64b), lo32(m64b),
+ hi32(m64s), lo32(m64s));
+ dt_add_property(np, "ibm,opal-single-pe", NULL, 0);
+ //dt_add_property_cells(np, "ibm,opal-msi-ports", 2048);
+ dt_add_property_cells(np, "ibm,opal-num-pes", 256);
+ dt_add_property_cells(np, "ibm,opal-reserved-pe", 0);
+ dt_add_property_cells(np, "ibm,opal-msi-ranges",
+ p->base_msi, PHB3_MSI_IRQ_COUNT);
+ tkill = reg + PHB_TCE_KILL;
+ dt_add_property_cells(np, "ibm,opal-tce-kill",
+ hi32(tkill), lo32(tkill));
+
+ /*
+ * Indicate to Linux that the architected IODA2 MSI EOI method
+ * is supported
+ */
+ dt_add_property_string(np, "ibm,msi-eoi-method", "ioda2");
+
+ /* The interrupt maps will be generated in the RC node by the
+ * PCI code based on the content of this structure:
+ */
+ lsibase = p->base_lsi;
+ p->phb.lstate.int_size = 1;
+ p->phb.lstate.int_val[0][0] = lsibase + PHB3_LSI_PCIE_INTA;
+ p->phb.lstate.int_val[1][0] = lsibase + PHB3_LSI_PCIE_INTB;
+ p->phb.lstate.int_val[2][0] = lsibase + PHB3_LSI_PCIE_INTC;
+ p->phb.lstate.int_val[3][0] = lsibase + PHB3_LSI_PCIE_INTD;
+ p->phb.lstate.int_parent[0] = icsp;
+ p->phb.lstate.int_parent[1] = icsp;
+ p->phb.lstate.int_parent[2] = icsp;
+ p->phb.lstate.int_parent[3] = icsp;
+
+ /* Indicators for variable tables */
+ dt_add_property_cells(np, "ibm,opal-rtt-table",
+ hi32(p->tbl_rtt), lo32(p->tbl_rtt), RTT_TABLE_SIZE);
+ dt_add_property_cells(np, "ibm,opal-peltv-table",
+ hi32(p->tbl_peltv), lo32(p->tbl_peltv), PELTV_TABLE_SIZE);
+ dt_add_property_cells(np, "ibm,opal-pest-table",
+ hi32(p->tbl_pest), lo32(p->tbl_pest), PEST_TABLE_SIZE);
+ dt_add_property_cells(np, "ibm,opal-ivt-table",
+ hi32(p->tbl_ivt), lo32(p->tbl_ivt), IVT_TABLE_SIZE);
+ dt_add_property_cells(np, "ibm,opal-ive-stride",
+ IVT_TABLE_STRIDE);
+ dt_add_property_cells(np, "ibm,opal-rba-table",
+ hi32(p->tbl_rba), lo32(p->tbl_rba), RBA_TABLE_SIZE);
+}
+
+static bool phb3_calculate_windows(struct phb3 *p)
+{
+ const struct dt_property *prop;
+
+ /* Get PBCQ MMIO windows from device-tree */
+ prop = dt_require_property(p->phb.dt_node,
+ "ibm,mmio-window", -1);
+ assert(prop->len >= (2 * sizeof(uint64_t)));
+
+ p->mm0_base = ((const uint64_t *)prop->prop)[0];
+ p->mm0_size = ((const uint64_t *)prop->prop)[1];
+ if (prop->len > 16) {
+ p->mm1_base = ((const uint64_t *)prop->prop)[2];
+ p->mm1_size = ((const uint64_t *)prop->prop)[3];
+ }
+
+ /* Sort them so that 0 is big and 1 is small */
+ if (p->mm1_size && p->mm1_size > p->mm0_size) {
+ uint64_t b = p->mm0_base;
+ uint64_t s = p->mm0_size;
+ p->mm0_base = p->mm1_base;
+ p->mm0_size = p->mm1_size;
+ p->mm1_base = b;
+ p->mm1_size = s;
+ }
+
+ /* If 1 is too small, ditch it */
+ if (p->mm1_size < M32_PCI_SIZE)
+ p->mm1_size = 0;
+
+ /* If 1 doesn't exist, carve it out of 0 */
+ if (p->mm1_size == 0) {
+ p->mm0_size /= 2;
+ p->mm1_base = p->mm0_base + p->mm0_size;
+ p->mm1_size = p->mm0_size;
+ }
+
+ /* Crop mm1 to our desired size */
+ if (p->mm1_size > M32_PCI_SIZE)
+ p->mm1_size = M32_PCI_SIZE;
+
+ return true;
+}
+
+static void phb3_create(struct dt_node *np)
+{
+ const struct dt_property *prop;
+ struct phb3 *p = zalloc(sizeof(struct phb3));
+ size_t lane_eq_len;
+ struct dt_node *iplp;
+ char *path;
+
+ assert(p);
+
+ /* Populate base stuff */
+ p->index = dt_prop_get_u32(np, "ibm,phb-index");
+ p->chip_id = dt_prop_get_u32(np, "ibm,chip-id");
+ p->regs = (void *)dt_get_address(np, 0, NULL);
+ p->base_msi = PHB3_MSI_IRQ_BASE(p->chip_id, p->index);
+ p->base_lsi = PHB3_LSI_IRQ_BASE(p->chip_id, p->index);
+ p->phb.dt_node = np;
+ p->phb.ops = &phb3_ops;
+ p->phb.phb_type = phb_type_pcie_v3;
+ p->phb.scan_map = 0x1; /* Only device 0 to scan */
+ p->capp_ucode_base = 0;
+ p->capp_ucode_loaded = false;
+ if (dt_has_node_property(np, "ibm,capp-ucode", NULL))
+ p->capp_ucode_base = dt_prop_get_u32(np, "ibm,capp-ucode");
+ p->max_link_speed = dt_prop_get_u32_def(np, "ibm,max-link-speed", 3);
+ p->state = PHB3_STATE_UNINITIALIZED;
+
+ if (!phb3_calculate_windows(p))
+ return;
+
+ /* Get the various XSCOM register bases from the device-tree */
+ prop = dt_require_property(np, "ibm,xscom-bases", 3 * sizeof(uint32_t));
+ p->pe_xscom = ((const uint32_t *)prop->prop)[0];
+ p->spci_xscom = ((const uint32_t *)prop->prop)[1];
+ p->pci_xscom = ((const uint32_t *)prop->prop)[2];
+
+ /*
+ * We skip the initial PERST assertion requested by the generic code
+ * when doing a cold boot because we are coming out of cold boot already
+ * so we save boot time that way. The PERST state machine will still
+ * handle waiting for the link to come up, it will just avoid actually
+ * asserting & deasserting the PERST output
+ *
+ * For a hot IPL, we still do a PERST
+ *
+ * Note: In absence of property (ie, FSP-less), we stick to the old
+ * behaviour and set skip_perst to true
+ */
+ p->skip_perst = true; /* Default */
+
+ iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params");
+ if (iplp) {
+ const char *ipl_type = dt_prop_get_def(iplp, "cec-major-type", NULL);
+ if (ipl_type && (!strcmp(ipl_type, "hot")))
+ p->skip_perst = false;
+ }
+
+ /* By default link is assumed down */
+ p->has_link = false;
+
+ /* We register the PHB before we initialize it so we
+ * get a useful OPAL ID for it
+ */
+ pci_register_phb(&p->phb);
+
+ /* Hello ! */
+ path = dt_get_path(np);
+ PHBINF(p, "Found %s @%p\n", path, p->regs);
+ PHBINF(p, " M32 [0x%016llx..0x%016llx]\n",
+ p->mm1_base, p->mm1_base + p->mm1_size - 1);
+ PHBINF(p, " M64 [0x%016llx..0x%016llx]\n",
+ p->mm0_base, p->mm0_base + p->mm0_size - 1);
+ free(path);
+
+ /* Check if we can use the A/B detect pins */
+ p->use_ab_detect = dt_has_node_property(np, "ibm,use-ab-detect", NULL);
+
+ /* Find base location code from root node */
+ p->phb.base_loc_code = dt_prop_get_def(dt_root,
+ "ibm,io-base-loc-code", NULL);
+ if (!p->phb.base_loc_code)
+ PHBERR(p, "Base location code not found !\n");
+
+ /* Check for lane equalization values from HB or HDAT */
+ p->lane_eq = dt_prop_get_def_size(np, "ibm,lane-eq", NULL, &lane_eq_len);
+ if (p->lane_eq && lane_eq_len != (8 * 4)) {
+ PHBERR(p, "Device-tree has ibm,lane-eq with wrong len %ld\n",
+ lane_eq_len);
+ p->lane_eq = NULL;
+ }
+ if (p->lane_eq) {
+ PHBDBG(p, "Override lane equalization settings:\n");
+ PHBDBG(p, " 0x%016llx 0x%016llx\n",
+ be64_to_cpu(p->lane_eq[0]), be64_to_cpu(p->lane_eq[1]));
+ PHBDBG(p, " 0x%016llx 0x%016llx\n",
+ be64_to_cpu(p->lane_eq[2]), be64_to_cpu(p->lane_eq[3]));
+ }
+
+ /*
+ * Grab CEC IO VPD load info from the root of the device-tree,
+ * on P8 there's a single such VPD for the whole machine
+ */
+ prop = dt_find_property(dt_root, "ibm,io-vpd");
+ if (!prop) {
+ /* LX VPD Lid not already loaded */
+ vpd_iohub_load(dt_root);
+ }
+
+ /* Allocate the SkiBoot internal in-memory tables for the PHB */
+ phb3_allocate_tables(p);
+
+ phb3_add_properties(p);
+
+ /* Clear IODA2 cache */
+ phb3_init_ioda_cache(p);
+
+ /* Register interrupt sources */
+ register_irq_source(&phb3_msi_irq_ops, p, p->base_msi,
+ PHB3_MSI_IRQ_COUNT);
+ register_irq_source(&phb3_lsi_irq_ops, p, p->base_lsi, 4);
+
+#ifndef DISABLE_ERR_INTS
+ register_irq_source(&phb3_err_lsi_irq_ops, p,
+ p->base_lsi + PHB3_LSI_PCIE_INF, 2);
+#endif
+ /* Get the HW up and running */
+ phb3_init_hw(p);
+
+ /* Load capp microcode into capp unit if PHB0 */
+ if (p->index == 0)
+ capp_load_ucode(p);
+
+ /* Platform additional setup */
+ if (platform.pci_setup_phb)
+ platform.pci_setup_phb(&p->phb, p->index);
+}
+
+static void phb3_probe_pbcq(struct dt_node *pbcq)
+{
+ uint32_t spci_xscom, pci_xscom, pe_xscom, gcid, pno;
+ uint64_t val, phb_bar, bar_en;
+ uint64_t mmio0_bar, mmio0_bmask, mmio0_sz;
+ uint64_t mmio1_bar, mmio1_bmask, mmio1_sz;
+ uint64_t reg[2];
+ uint64_t mmio_win[4];
+ unsigned int mmio_win_sz;
+ struct dt_node *np;
+ char *path;
+ uint64_t capp_ucode_base;
+ unsigned int max_link_speed;
+
+ gcid = dt_get_chip_id(pbcq);
+ pno = dt_prop_get_u32(pbcq, "ibm,phb-index");
+ path = dt_get_path(pbcq);
+ printf("Chip %d Found PBCQ%d at %s\n", gcid, pno, path);
+ free(path);
+
+ pe_xscom = dt_get_address(pbcq, 0, NULL);
+ pci_xscom = dt_get_address(pbcq, 1, NULL);
+ spci_xscom = dt_get_address(pbcq, 2, NULL);
+ printf("PHB3[%d:%d]: X[PE]=0x%08x X[PCI]=0x%08x X[SPCI]=0x%08x\n",
+ gcid, pno, pe_xscom, pci_xscom, spci_xscom);
+
+ /* Check if CAPP mode */
+ if (xscom_read(gcid, spci_xscom + 0x03, &val)) {
+ prerror("PHB3[%d:%d]: Cannot read AIB CAPP ENABLE\n",
+ gcid, pno);
+ return;
+ }
+ if (val >> 63) {
+ prerror("PHB3[%d:%d]: Ignoring bridge in CAPP mode\n",
+ gcid, pno);
+ return;
+ }
+
+ /* Get PE BARs, assume only 0 and 2 are used for now */
+ xscom_read(gcid, pe_xscom + 0x42, &phb_bar);
+ phb_bar >>= 14;
+ printf("PHB3[%d:%d] REGS = 0x%016llx [4k]\n",
+ gcid, pno, phb_bar);
+ if (phb_bar == 0) {
+ prerror("PHB3[%d:%d]: No PHB BAR set !\n", gcid, pno);
+ return;
+ }
+
+ /* Dbl check PHB BAR */
+ xscom_read(gcid, spci_xscom + 1, &val);/* HW275117 */
+ xscom_read(gcid, pci_xscom + 0x0b, &val);
+ val >>= 14;
+ printf("PHB3[%d:%d] PCIBAR = 0x%016llx\n", gcid, pno, val);
+ if (phb_bar != val) {
+ prerror("PHB3[%d:%d] PCIBAR invalid, fixing up...\n",
+ gcid, pno);
+ xscom_read(gcid, spci_xscom + 1, &val);/* HW275117 */
+ xscom_write(gcid, pci_xscom + 0x0b, phb_bar << 14);
+ }
+
+ /* Check MMIO BARs */
+ xscom_read(gcid, pe_xscom + 0x40, &mmio0_bar);
+ xscom_read(gcid, pe_xscom + 0x43, &mmio0_bmask);
+ mmio0_bmask &= 0xffffffffc0000000ull;
+ mmio0_sz = ((~mmio0_bmask) >> 14) + 1;
+ mmio0_bar >>= 14;
+ printf("PHB3[%d:%d] MMIO0 = 0x%016llx [0x%016llx]\n",
+ gcid, pno, mmio0_bar, mmio0_sz);
+ xscom_read(gcid, pe_xscom + 0x41, &mmio1_bar);
+ xscom_read(gcid, pe_xscom + 0x44, &mmio1_bmask);
+ mmio1_bmask &= 0xffffffffc0000000ull;
+ mmio1_sz = ((~mmio1_bmask) >> 14) + 1;
+ mmio1_bar >>= 14;
+ printf("PHB3[%d:%d] MMIO1 = 0x%016llx [0x%016llx]\n",
+ gcid, pno, mmio1_bar, mmio1_sz);
+
+ /* Check BAR enable
+ *
+ * XXX BAR aren't always enabled by HB, we'll make assumptions
+ * that BARs are valid if they value is non-0
+ */
+ xscom_read(gcid, pe_xscom + 0x45, &bar_en);
+ printf("PHB3[%d:%d] BAREN = 0x%016llx\n",
+ gcid, pno, bar_en);
+
+ /* Always enable PHB BAR */
+ bar_en |= 0x2000000000000000ull;
+
+ /* Build MMIO windows list */
+ mmio_win_sz = 0;
+ if (mmio0_bar) {
+ mmio_win[mmio_win_sz++] = mmio0_bar;
+ mmio_win[mmio_win_sz++] = mmio0_sz;
+ bar_en |= 0x8000000000000000ul;
+ }
+ if (mmio1_bar) {
+ mmio_win[mmio_win_sz++] = mmio1_bar;
+ mmio_win[mmio_win_sz++] = mmio1_sz;
+ bar_en |= 0x4000000000000000ul;
+ }
+
+ /* No MMIO windows ? Barf ! */
+ if (mmio_win_sz == 0) {
+ prerror("PHB3[%d:%d]: No MMIO windows enabled !\n",
+ gcid, pno);
+ return;
+ }
+
+ /* Set the interrupt routing stuff, 8 relevant bits in mask
+ * (11 bits per PHB)
+ */
+ val = P8_CHIP_IRQ_PHB_BASE(gcid, pno);
+ val = (val << 45);
+ xscom_write(gcid, pe_xscom + 0x1a, val);
+ xscom_write(gcid, pe_xscom + 0x1b, 0xff00000000000000ul);
+
+ /* Configure LSI location to the top of the map */
+ xscom_write(gcid, pe_xscom + 0x1f, 0xff00000000000000ul);
+
+ /* Now add IRSN message bits to BAR enable and write it */
+ bar_en |= 0x1800000000000000ul;
+ xscom_write(gcid, pe_xscom + 0x45, bar_en);
+
+ printf("PHB3[%d:%d] NEWBAREN = 0x%016llx\n",
+ gcid, pno, bar_en);
+
+ xscom_read(gcid, pe_xscom + 0x1a, &val);
+ printf("PHB3[%d:%d] IRSNC = 0x%016llx\n",
+ gcid, pno, val);
+ xscom_read(gcid, pe_xscom + 0x1b, &val);
+ printf("PHB3[%d:%d] IRSNM = 0x%016llx\n",
+ gcid, pno, val);
+ printf("PHB3[%d:%d] LSI = 0x%016llx\n",
+ gcid, pno, val);
+
+ /* Create PHB node */
+ reg[0] = phb_bar;
+ reg[1] = 0x1000;
+
+ np = dt_new_addr(dt_root, "pciex", reg[0]);
+ if (!np)
+ return;
+
+ dt_add_property_strings(np, "compatible", "ibm,power8-pciex",
+ "ibm,ioda2-phb");
+ dt_add_property_strings(np, "device_type", "pciex");
+ dt_add_property(np, "reg", reg, sizeof(reg));
+
+ /* Everything else is handled later by skiboot, we just
+ * stick a few hints here
+ */
+ dt_add_property_cells(np, "ibm,xscom-bases",
+ pe_xscom, spci_xscom, pci_xscom);
+ dt_add_property(np, "ibm,mmio-window", mmio_win, 8 * mmio_win_sz);
+ dt_add_property_cells(np, "ibm,phb-index", pno);
+ dt_add_property_cells(np, "ibm,pbcq", pbcq->phandle);
+ dt_add_property_cells(np, "ibm,chip-id", gcid);
+ if (dt_has_node_property(pbcq, "ibm,use-ab-detect", NULL))
+ dt_add_property(np, "ibm,use-ab-detect", NULL, 0);
+ if (dt_has_node_property(pbcq, "ibm,hub-id", NULL))
+ dt_add_property_cells(np, "ibm,hub-id",
+ dt_prop_get_u32(pbcq, "ibm,hub-id"));
+ if (dt_has_node_property(pbcq, "ibm,loc-code", NULL)) {
+ const char *lc = dt_prop_get(pbcq, "ibm,loc-code");
+ dt_add_property_string(np, "ibm,loc-code", lc);
+ }
+ if (dt_has_node_property(pbcq, "ibm,lane-eq", NULL)) {
+ size_t leq_size;
+ const void *leq = dt_prop_get_def_size(pbcq, "ibm,lane-eq",
+ NULL, &leq_size);
+ if (leq != NULL && leq_size == 4 * 8)
+ dt_add_property(np, "ibm,lane-eq", leq, leq_size);
+ }
+ if (dt_has_node_property(pbcq, "ibm,capp-ucode", NULL)) {
+ capp_ucode_base = dt_prop_get_u32(pbcq, "ibm,capp-ucode");
+ dt_add_property_cells(np, "ibm,capp-ucode", capp_ucode_base);
+ }
+ max_link_speed = dt_prop_get_u32_def(pbcq, "ibm,max-link-speed", 3);
+ dt_add_property_cells(np, "ibm,max-link-speed", max_link_speed);
+
+ add_chip_dev_associativity(np);
+}
+
+void probe_phb3(void)
+{
+ struct dt_node *np;
+
+ /* Look for PBCQ XSCOM nodes */
+ dt_for_each_compatible(dt_root, np, "ibm,power8-pbcq")
+ phb3_probe_pbcq(np);
+
+ /* Look for newly created PHB nodes */
+ dt_for_each_compatible(dt_root, np, "ibm,power8-pciex")
+ phb3_create(np);
+}
diff --git a/hw/psi.c b/hw/psi.c
new file mode 100644
index 0000000..5cbae34
--- /dev/null
+++ b/hw/psi.c
@@ -0,0 +1,873 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Service Processor serial console handling code
+ */
+#include <io.h>
+#include <psi.h>
+#include <fsp.h>
+#include <opal.h>
+#include <gx.h>
+#include <interrupts.h>
+#include <cpu.h>
+#include <trace.h>
+#include <xscom.h>
+#include <chip.h>
+#include <timebase.h>
+#include <platform.h>
+
+//#define DBG(fmt...) printf(fmt)
+#define DBG(fmt...) do { } while(0)
+//#define FSP_TRACE
+
+static LIST_HEAD(psis);
+static u64 psi_link_timer;
+static u64 psi_link_timeout;
+bool psi_link_poll_active;
+static bool psi_ext_irq_policy = EXTERNAL_IRQ_POLICY_LINUX;
+
+static void psi_register_interrupts(struct psi *psi);
+static void psi_activate_phb(struct psi *psi);
+
+static struct lock psi_lock = LOCK_UNLOCKED;
+
+void psi_set_link_polling(bool active)
+{
+ printf("PSI: %sing link polling\n",
+ active ? "start" : "stopp");
+ psi_link_poll_active = active;
+}
+
+void psi_disable_link(struct psi *psi)
+{
+ u64 val;
+
+ lock(&psi_lock);
+
+ /*
+ * Note: This can be called with the link already down but
+ * not detected as such yet by this layer since psi_check_link_active()
+ * operates locklessly and thus won't update the PSI structure. This
+ * is a non-issue, the only consequence is the messages in the log
+ * mentioning first the link having gone down then being disabled.
+ */
+ if (psi->active) {
+ psi->active = false;
+
+ printf("PSI[0x%03x]: Disabling link!\n", psi->chip_id);
+
+ /* Clear the link enable bit and disable FSP interrupts */
+ val = in_be64(psi->regs + PSIHB_CR);
+ val &= ~PSIHB_CR_PSI_LINK_ENABLE;
+ val &= ~PSIHB_CR_FSP_IRQ_ENABLE;
+ val &= ~PSIHB_CR_FSP_IRQ; /* Clear interrupt state too */
+ out_be64(psi->regs + PSIHB_CR, val);
+ }
+
+ unlock(&psi_lock);
+}
+
+bool psi_check_link_active(struct psi *psi)
+{
+ u64 val = in_be64(psi->regs + PSIHB_CR);
+
+ /*
+ * Unlocked, used during fsp_poke_msg so we really want
+ * to avoid fancy link re-entrancy and deadlocks here
+ */
+ if (!psi->active)
+ return false;
+ return (val & PSIHB_CR_PSI_LINK_ENABLE) &&
+ (val & PSIHB_CR_FSP_LINK_ACTIVE);
+}
+
+struct psi *psi_find_link(uint32_t chip_id)
+{
+ struct psi *psi;
+
+ list_for_each(&psis, psi, list) {
+ if (psi->chip_id == chip_id)
+ return psi;
+ }
+ return NULL;
+}
+
+#define PSI_LINK_CHECK_INTERVAL 10 /* Interval in secs */
+#define PSI_LINK_RECOVERY_TIMEOUT 900 /* 15 minutes */
+
+static void psi_link_poll(void *data __unused)
+{
+ struct psi *psi;
+ u64 now;
+
+ if (!psi_link_poll_active)
+ return;
+
+ now = mftb();
+ if (psi_link_timer == 0 ||
+ (tb_compare(now, psi_link_timer) == TB_AAFTERB) ||
+ (tb_compare(now, psi_link_timer) == TB_AEQUALB)) {
+
+ list_for_each(&psis, psi, list) {
+ u64 val;
+
+ if (psi->active || !psi->working)
+ continue;
+
+ lock(&psi_lock);
+ if (psi->active || !psi->working) {
+ unlock(&psi_lock);
+ continue;
+ }
+
+ val = in_be64(psi->regs + PSIHB_CR);
+
+ printf("PSI[0x%03x]: Poll CR=0x%016llx\n",
+ psi->chip_id, val);
+
+ if ((val & PSIHB_CR_PSI_LINK_ENABLE) &&
+ (val & PSIHB_CR_FSP_LINK_ACTIVE)) {
+ printf("PSI[0x%03x]: Found active link!\n",
+ psi->chip_id);
+ psi_link_timeout = 0;
+ psi->active = true;
+ psi_activate_phb(psi);
+ unlock(&psi_lock);
+ fsp_reinit_fsp();
+ return;
+ }
+ unlock(&psi_lock);
+ }
+
+ if (!psi_link_timeout)
+ psi_link_timeout =
+ now + secs_to_tb(PSI_LINK_RECOVERY_TIMEOUT);
+
+ if (tb_compare(now, psi_link_timeout) == TB_AAFTERB) {
+ prerror("PSI: Timed out looking for a PSI link\n");
+
+ /* Log error to the host from here */
+ }
+
+ /* Poll every 10 seconds */
+ psi_link_timer = now + secs_to_tb(PSI_LINK_CHECK_INTERVAL);
+ }
+}
+
+void psi_enable_fsp_interrupt(struct psi *psi)
+{
+ if (!psi->working)
+ return;
+
+ /* Enable FSP interrupts in the GXHB */
+ lock(&psi_lock);
+ out_be64(psi->regs + PSIHB_CR,
+ in_be64(psi->regs + PSIHB_CR) | PSIHB_CR_FSP_IRQ_ENABLE);
+ unlock(&psi_lock);
+}
+
+/* Multiple bits can be set on errors */
+static void decode_psihb_error(u64 val)
+{
+ if (val & PSIHB_CR_PSI_ERROR)
+ printf("PSI: PSI Reported Error\n");
+ if (val & PSIHB_CR_PSI_LINK_INACTIVE)
+ printf("PSI: PSI Link Inactive Transition\n");
+ if (val & PSIHB_CR_FSP_ACK_TIMEOUT)
+ printf("PSI: FSP Ack Timeout\n");
+ if (val & PSIHB_CR_MMIO_LOAD_TIMEOUT)
+ printf("PSI: MMIO Load Timeout\n");
+ if (val & PSIHB_CR_MMIO_LENGTH_ERROR)
+ printf("PSI: MMIO Length Error\n");
+ if (val & PSIHB_CR_MMIO_ADDRESS_ERROR)
+ printf("PSI: MMIO Address Error\n");
+ if (val & PSIHB_CR_MMIO_TYPE_ERROR)
+ printf("PSI: MMIO Type Error\n");
+ if (val & PSIHB_CR_UE)
+ printf("PSI: UE Detected\n");
+ if (val & PSIHB_CR_PARITY_ERROR)
+ printf("PSI: Internal Parity Error\n");
+ if (val & PSIHB_CR_SYNC_ERR_ALERT1)
+ printf("PSI: Sync Error Alert1\n");
+ if (val & PSIHB_CR_SYNC_ERR_ALERT2)
+ printf("PSI: Sync Error Alert2\n");
+ if (val & PSIHB_CR_FSP_COMMAND_ERROR)
+ printf("PSI: FSP Command Error\n");
+}
+
+
+static void handle_psi_interrupt(struct psi *psi, u64 val)
+{
+ u64 reg;
+
+ printf("PSI[0x%03x]: PSI mgmnt interrupt CR=0x%016llx\n",
+ psi->chip_id, val);
+
+ if (val & (0xfffull << 20)) {
+ lock(&psi_lock);
+ psi->active = false;
+
+ decode_psihb_error(val);
+
+ /* Mask errors in SEMR */
+ reg = in_be64(psi->regs + PSIHB_SEMR);
+ reg = ((0xfffull << 36) | (0xfffull << 20));
+ out_be64(psi->regs + PSIHB_SEMR, reg);
+ printf("PSI: SEMR set to %llx\n", reg);
+
+ /* Reset all the error bits in PSIHB_CR and
+ * disable FSP interrupts
+ */
+ val = in_be64(psi->regs + PSIHB_CR);
+ val &= ~(0x7ffull << 20);
+ val &= ~PSIHB_CR_PSI_LINK_ENABLE; /* flip link enable */
+ /*
+ * Ensure no commands/spurious interrupts reach
+ * the processor, by flipping the command enable.
+ */
+ val &= ~PSIHB_CR_FSP_CMD_ENABLE;
+ val &= ~PSIHB_CR_FSP_IRQ_ENABLE;
+ val &= ~PSIHB_CR_FSP_IRQ; /* Clear interrupt state too */
+ out_be64(psi->regs + PSIHB_CR, val);
+ printf("PSI: PSIHB_CR (error bits) set to %llx\n",
+ in_be64(psi->regs + PSIHB_CR));
+ unlock(&psi_lock);
+ } else if (val & (0x1full << 11))
+ printf("PSI: FSP error detected\n");
+}
+
+/* TODO: Determine which of these needs to be handled by powernv */
+static void handle_extra_interrupt(struct psi *psi)
+{
+ u64 val;
+
+ val = in_be64(psi->regs + PSIHB_IRQ_STATUS);
+
+ /*
+ * Decode interrupt type, call appropriate handlers
+ * when available.
+ */
+ if (val & PSIHB_IRQ_STAT_OCC)
+ printf("PSI: OCC irq received\n");
+ if (val & PSIHB_IRQ_STAT_FSI)
+ printf("PSI: FSI irq received\n");
+ if (val & PSIHB_IRQ_STAT_LPC)
+ printf("PSI: LPC/I2C irq received\n");
+ if (val & PSIHB_IRQ_STAT_LOCAL_ERR)
+ printf("PSI: ATTN irq received\n");
+ if (val & PSIHB_IRQ_STAT_HOST_ERR) {
+ if (platform.external_irq)
+ platform.external_irq(psi->chip_id);
+ }
+
+ /*
+ * TODO: Per Vicente Chung, CRESPs don't generate interrupts,
+ * and are just informational. Need to define the policy
+ * to handle them.
+ */
+}
+
+static void psi_spurious_fsp_irq(struct psi *psi)
+{
+ u64 reg, bit;
+
+ prerror("PSI: Spurious interrupt, attempting clear\n");
+
+ if (proc_gen == proc_gen_p8) {
+ reg = PSIHB_XSCOM_P8_HBCSR_CLR;
+ bit = PSIHB_XSCOM_P8_HBSCR_FSP_IRQ;
+ } else {
+ reg = PSIHB_XSCOM_P7_HBCSR_CLR;
+ bit = PSIHB_XSCOM_P7_HBSCR_FSP_IRQ;
+ }
+ xscom_write(psi->chip_id, psi->xscom_base + reg, bit);
+}
+
+bool psi_poll_fsp_interrupt(struct psi *psi)
+{
+ return !!(in_be64(psi->regs + PSIHB_CR) & PSIHB_CR_FSP_IRQ);
+}
+
+static void psi_interrupt(void *data, uint32_t isn __unused)
+{
+ struct psi *psi = data;
+ u64 val;
+
+ val = in_be64(psi->regs + PSIHB_CR);
+
+ if (psi_link_poll_active) {
+ printf("PSI[0x%03x]: PSI interrupt CR=0x%016llx (A=%d)\n",
+ psi->chip_id, val, psi->active);
+ }
+
+ /* Handle PSI interrupts first in case it's a link down */
+ if (val & PSIHB_CR_PSI_IRQ) {
+ handle_psi_interrupt(psi, val);
+
+ /*
+ * If the link went down, re-read PSIHB_CR as
+ * the FSP interrupt might have been cleared.
+ */
+ if (!psi->active)
+ val = in_be64(psi->regs + PSIHB_CR);
+ }
+
+
+ /*
+ * We avoid forwarding FSP interrupts if the link isn't
+ * active. They should be masked anyway but it looks
+ * like the CR bit can remain set.
+ */
+ if (val & PSIHB_CR_FSP_IRQ) {
+ /*
+ * We have a case a flood with FSP mailbox interrupts
+ * when the link is down, see if we manage to clear
+ * the condition
+ */
+ if (!psi->active)
+ psi_spurious_fsp_irq(psi);
+ else
+ fsp_interrupt();
+ }
+
+ /* P8 additional interrupt? */
+ if (proc_gen == proc_gen_p8)
+ handle_extra_interrupt(psi);
+
+ /* Poll the console buffers on any interrupt since we don't
+ * get send notifications
+ */
+ fsp_console_poll(NULL);
+}
+
+static int64_t psi_p7_set_xive(void *data, uint32_t isn __unused,
+ uint16_t server, uint8_t priority)
+{
+ struct psi *psi = data;
+ uint64_t xivr;
+
+ if (!psi->working)
+ return OPAL_HARDWARE;
+
+ /* Populate the XIVR */
+ xivr = (uint64_t)server << 40;
+ xivr |= (uint64_t)priority << 32;
+ xivr |= P7_IRQ_BUID(psi->interrupt) << 16;
+
+ out_be64(psi->regs + PSIHB_XIVR, xivr);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t psi_p7_get_xive(void *data, uint32_t isn __unused,
+ uint16_t *server, uint8_t *priority)
+{
+ struct psi *psi = data;
+ uint64_t xivr;
+
+ if (!psi->working)
+ return OPAL_HARDWARE;
+
+ /* Read & decode the XIVR */
+ xivr = in_be64(psi->regs + PSIHB_XIVR);
+
+ *server = (xivr >> 40) & 0x7ff;
+ *priority = (xivr >> 32) & 0xff;
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t psi_p8_set_xive(void *data, uint32_t isn,
+ uint16_t server, uint8_t priority)
+{
+ struct psi *psi = data;
+ uint64_t xivr_p, xivr;
+
+ switch(isn & 7) {
+ case P8_IRQ_PSI_FSP:
+ xivr_p = PSIHB_XIVR_FSP;
+ break;
+ case P8_IRQ_PSI_OCC:
+ xivr_p = PSIHB_XIVR_OCC;
+ break;
+ case P8_IRQ_PSI_FSI:
+ xivr_p = PSIHB_XIVR_FSI;
+ break;
+ case P8_IRQ_PSI_LPC:
+ xivr_p = PSIHB_XIVR_LPC;
+ break;
+ case P8_IRQ_PSI_LOCAL_ERR:
+ xivr_p = PSIHB_XIVR_LOCAL_ERR;
+ break;
+ case P8_IRQ_PSI_HOST_ERR:
+ xivr_p = PSIHB_XIVR_HOST_ERR;
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /* Populate the XIVR */
+ xivr = (uint64_t)server << 40;
+ xivr |= (uint64_t)priority << 32;
+ xivr |= (uint64_t)(isn & 7) << 29;
+
+ out_be64(psi->regs + xivr_p, xivr);
+
+ return OPAL_SUCCESS;
+}
+
+static int64_t psi_p8_get_xive(void *data, uint32_t isn __unused,
+ uint16_t *server, uint8_t *priority)
+{
+ struct psi *psi = data;
+ uint64_t xivr_p, xivr;
+
+ switch(isn & 7) {
+ case P8_IRQ_PSI_FSP:
+ xivr_p = PSIHB_XIVR_FSP;
+ break;
+ case P8_IRQ_PSI_OCC:
+ xivr_p = PSIHB_XIVR_OCC;
+ break;
+ case P8_IRQ_PSI_FSI:
+ xivr_p = PSIHB_XIVR_FSI;
+ break;
+ case P8_IRQ_PSI_LPC:
+ xivr_p = PSIHB_XIVR_LPC;
+ break;
+ case P8_IRQ_PSI_LOCAL_ERR:
+ xivr_p = PSIHB_XIVR_LOCAL_ERR;
+ break;
+ case P8_IRQ_PSI_HOST_ERR:
+ xivr_p = PSIHB_XIVR_HOST_ERR;
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /* Read & decode the XIVR */
+ xivr = in_be64(psi->regs + xivr_p);
+
+ *server = (xivr >> 40) & 0xffff;
+ *priority = (xivr >> 32) & 0xff;
+
+ return OPAL_SUCCESS;
+}
+
+/* Called on a fast reset, make sure we aren't stuck with
+ * an accepted and never EOId PSI interrupt
+ */
+void psi_irq_reset(void)
+{
+ struct psi *psi;
+ uint64_t xivr;
+
+ printf("PSI: Hot reset!\n");
+
+ assert(proc_gen == proc_gen_p7);
+
+ list_for_each(&psis, psi, list) {
+ /* Mask the interrupt & clean the XIVR */
+ xivr = 0x000000ff00000000;
+ xivr |= P7_IRQ_BUID(psi->interrupt) << 16;
+ out_be64(psi->regs + PSIHB_XIVR, xivr);
+
+#if 0 /* Seems to checkstop ... */
+ /*
+ * Maybe not anymore; we were just blindly sending
+ * this on all iopaths, not just the active one;
+ * We don't even know if those psis are even correct.
+ */
+ /* Send a dummy EOI to make sure the ICP is clear */
+ icp_send_eoi(psi->interrupt);
+#endif
+ }
+}
+
+static const struct irq_source_ops psi_p7_irq_ops = {
+ .get_xive = psi_p7_get_xive,
+ .set_xive = psi_p7_set_xive,
+ .interrupt = psi_interrupt,
+};
+
+static const struct irq_source_ops psi_p8_irq_ops = {
+ .get_xive = psi_p8_get_xive,
+ .set_xive = psi_p8_set_xive,
+ .interrupt = psi_interrupt,
+};
+
+static const struct irq_source_ops psi_p8_host_err_ops = {
+ .get_xive = psi_p8_get_xive,
+ .set_xive = psi_p8_set_xive,
+};
+
+static void psi_tce_enable(struct psi *psi, bool enable)
+{
+ void *addr;
+ u64 val;
+
+ switch (proc_gen) {
+ case proc_gen_p7:
+ addr = psi->regs + PSIHB_CR;
+ break;
+ case proc_gen_p8:
+ addr = psi->regs + PSIHB_PHBSCR;
+ break;
+ default:
+ prerror("%s: Unknown CPU type\n", __func__);
+ return;
+ }
+
+ val = in_be64(addr);
+ if (enable)
+ val |= PSIHB_CR_TCE_ENABLE;
+ else
+ val &= ~PSIHB_CR_TCE_ENABLE;
+ out_be64(addr, val);
+}
+
+/*
+ * Configure the PSI interface for communicating with
+ * an FSP, such as enabling the TCEs, FSP commands,
+ * etc...
+ */
+void psi_init_for_fsp(struct psi *psi)
+{
+ uint64_t reg;
+ bool enable_tce = true;
+
+ lock(&psi_lock);
+
+ /* Disable and setup TCE base address */
+ psi_tce_enable(psi, false);
+
+ switch (proc_gen) {
+ case proc_gen_p7:
+ out_be64(psi->regs + PSIHB_TAR, PSI_TCE_TABLE_BASE |
+ PSIHB_TAR_16K_ENTRIES);
+ break;
+ case proc_gen_p8:
+ out_be64(psi->regs + PSIHB_TAR, PSI_TCE_TABLE_BASE |
+ PSIHB_TAR_256K_ENTRIES);
+ break;
+ default:
+ enable_tce = false;
+ };
+
+ /* Enable various other configuration register bits based
+ * on what pHyp does. We keep interrupts disabled until
+ * after the mailbox has been properly configured. We assume
+ * basic stuff such as PSI link enable is already there.
+ *
+ * - FSP CMD Enable
+ * - FSP MMIO Enable
+ * - TCE Enable
+ * - Error response enable
+ *
+ * Clear all other error bits
+ */
+ if (!psi->active) {
+ prerror("PSI: psi_init_for_fsp() called on inactive link!\n");
+ unlock(&psi_lock);
+ return;
+ }
+
+ reg = in_be64(psi->regs + PSIHB_CR);
+ reg |= PSIHB_CR_FSP_CMD_ENABLE;
+ reg |= PSIHB_CR_FSP_MMIO_ENABLE;
+ reg |= PSIHB_CR_FSP_ERR_RSP_ENABLE;
+ reg &= ~0x00000000ffffffffull;
+ out_be64(psi->regs + PSIHB_CR, reg);
+ psi_tce_enable(psi, enable_tce);
+
+ unlock(&psi_lock);
+}
+
+void psi_set_external_irq_policy(bool policy)
+{
+ psi_ext_irq_policy = policy;
+}
+
+/*
+ * Register interrupt sources for all working links, not just the active ones.
+ * This is a one time activity.
+ */
+static void psi_register_interrupts(struct psi *psi)
+{
+ /* Configure the interrupt BUID and mask it */
+ switch (proc_gen) {
+ case proc_gen_p7:
+ /* On P7, we get a single interrupt */
+ out_be64(psi->regs + PSIHB_XIVR,
+ P7_IRQ_BUID(psi->interrupt) << 16 |
+ 0xffull << 32);
+
+ /* Configure it in the GX controller as well */
+ gx_configure_psi_buid(psi->chip_id,
+ P7_IRQ_BUID(psi->interrupt));
+
+ /* Register the IRQ source */
+ register_irq_source(&psi_p7_irq_ops,
+ psi, psi->interrupt, 1);
+ break;
+ case proc_gen_p8:
+ /* On P8 we get a block of 8, set up the base/mask
+ * and mask all the sources for now
+ */
+ out_be64(psi->regs + PSIHB_ISRN,
+ SETFIELD(PSIHB_ISRN_COMP, 0ul, psi->interrupt) |
+ SETFIELD(PSIHB_ISRN_MASK, 0ul, 0x7fff8ul) |
+ PSIHB_ISRN_DOWNSTREAM_EN |
+ PSIHB_ISRN_UPSTREAM_EN);
+ out_be64(psi->regs + PSIHB_XIVR_FSP,
+ (0xffull << 32) | (P8_IRQ_PSI_FSP << 29));
+ out_be64(psi->regs + PSIHB_XIVR_OCC,
+ (0xffull << 32) | (P8_IRQ_PSI_OCC << 29));
+ out_be64(psi->regs + PSIHB_XIVR_FSI,
+ (0xffull << 32) | (P8_IRQ_PSI_FSI << 29));
+ out_be64(psi->regs + PSIHB_XIVR_LPC,
+ (0xffull << 32) | (P8_IRQ_PSI_LPC << 29));
+ out_be64(psi->regs + PSIHB_XIVR_LOCAL_ERR,
+ (0xffull << 32) | (P8_IRQ_PSI_LOCAL_ERR << 29));
+ out_be64(psi->regs + PSIHB_XIVR_HOST_ERR,
+ (0xffull << 32) | (P8_IRQ_PSI_HOST_ERR << 29));
+
+ /*
+ * Register the IRQ sources FSP, OCC, FSI, LPC
+ * and Local Error. Host Error is actually the
+ * external interrupt and the policy for that comes
+ * from the platform
+ */
+ if (psi_ext_irq_policy == EXTERNAL_IRQ_POLICY_SKIBOOT) {
+ register_irq_source(&psi_p8_irq_ops,
+ psi,
+ psi->interrupt + P8_IRQ_PSI_SKIBOOT_BASE,
+ P8_IRQ_PSI_ALL_COUNT);
+ } else {
+ register_irq_source(&psi_p8_irq_ops,
+ psi,
+ psi->interrupt + P8_IRQ_PSI_SKIBOOT_BASE,
+ P8_IRQ_PSI_LOCAL_COUNT);
+ /*
+ * Host Error is handled by powernv; host error
+ * is at offset 5 from the PSI base.
+ */
+ register_irq_source(&psi_p8_host_err_ops,
+ psi,
+ psi->interrupt + P8_IRQ_PSI_LINUX_BASE,
+ P8_IRQ_PSI_LINUX_COUNT);
+ }
+ break;
+ default:
+ /* Unknown: just no interrupts */
+ prerror("PSI: Unknown interrupt type\n");
+ }
+}
+
+static void psi_activate_phb(struct psi *psi)
+{
+ u64 reg;
+
+ /*
+ * Disable interrupt emission in the control register,
+ * it will be re-enabled later, after the mailbox one
+ * will have been enabled.
+ */
+ reg = in_be64(psi->regs + PSIHB_CR);
+ reg &= ~PSIHB_CR_FSP_IRQ_ENABLE;
+ out_be64(psi->regs + PSIHB_CR, reg);
+
+ /* Enable interrupts in the mask register. We enable everything
+ * except for bit "FSP command error detected" which the doc
+ * (P7 BookIV) says should be masked for normal ops. It also
+ * seems to be masked under OPAL.
+ */
+ reg = 0x0000010000100000ull;
+ out_be64(psi->regs + PSIHB_SEMR, reg);
+
+#if 0
+ /* Dump the GXHB registers */
+ printf(" PSIHB_BBAR : %llx\n",
+ in_be64(psi->regs + PSIHB_BBAR));
+ printf(" PSIHB_FSPBAR : %llx\n",
+ in_be64(psi->regs + PSIHB_FSPBAR));
+ printf(" PSIHB_FSPMMR : %llx\n",
+ in_be64(psi->regs + PSIHB_FSPMMR));
+ printf(" PSIHB_TAR : %llx\n",
+ in_be64(psi->regs + PSIHB_TAR));
+ printf(" PSIHB_CR : %llx\n",
+ in_be64(psi->regs + PSIHB_CR));
+ printf(" PSIHB_SEMR : %llx\n",
+ in_be64(psi->regs + PSIHB_SEMR));
+ printf(" PSIHB_XIVR : %llx\n",
+ in_be64(psi->regs + PSIHB_XIVR));
+#endif
+}
+
+static void psi_create_mm_dtnode(struct psi *psi)
+{
+ struct dt_node *np;
+ uint64_t addr = (uint64_t)psi->regs;
+
+ np = dt_new_addr(dt_root, "psi", addr);
+ if (!np)
+ return;
+
+ /* Hard wire size to 4G */
+ dt_add_property_cells(np, "reg", hi32(addr), lo32(addr), 1, 0);
+ switch (proc_gen) {
+ case proc_gen_p7:
+ dt_add_property_strings(np, "compatible", "ibm,psi",
+ "ibm,power7-psi");
+ break;
+ case proc_gen_p8:
+ dt_add_property_strings(np, "compatible", "ibm,psi",
+ "ibm,power8-psi");
+ break;
+ default:
+ dt_add_property_strings(np, "compatible", "ibm,psi");
+ }
+ dt_add_property_cells(np, "interrupt-parent", get_ics_phandle());
+ dt_add_property_cells(np, "interrupts", psi->interrupt);
+ dt_add_property_cells(np, "ibm,chip-id", psi->chip_id);
+}
+
+static struct psi *alloc_psi(uint64_t base)
+{
+ struct psi *psi;
+
+ psi = zalloc(sizeof(struct psi));
+ if (!psi) {
+ prerror("PSI: Could not allocate memory\n");
+ return NULL;
+ }
+ psi->xscom_base = base;
+ return psi;
+}
+
+static struct psi *psi_probe_p7(struct proc_chip *chip, u64 base)
+{
+ struct psi *psi = NULL;
+ uint64_t rc, val;
+
+ rc = xscom_read(chip->id, base + PSIHB_XSCOM_P7_HBBAR, &val);
+ if (rc) {
+ prerror("PSI: Error %llx reading PSIHB BAR on chip %d\n",
+ rc, chip->id);
+ return NULL;
+ }
+ if (val & PSIHB_XSCOM_P7_HBBAR_EN) {
+ psi = alloc_psi(base);
+ if (!psi)
+ return NULL;
+ psi->working = true;
+ rc = val >> 36; /* Bits 0:1 = 0x00; 2:27 Bridge BAR... */
+ rc <<= 20; /* ... corresponds to bits 18:43 of base addr */
+ psi->regs = (void *)rc;
+ } else
+ printf("PSI[0x%03x]: Working link not found\n", chip->id);
+
+ return psi;
+}
+
+static struct psi *psi_probe_p8(struct proc_chip *chip, u64 base)
+{
+ struct psi *psi = NULL;
+ uint64_t rc, val;
+
+ rc = xscom_read(chip->id, base + PSIHB_XSCOM_P8_BASE, &val);
+ if (rc) {
+ prerror("PSI[0x%03x]: Error %llx reading PSIHB BAR\n",
+ chip->id, rc);
+ return NULL;
+ }
+ if (val & PSIHB_XSCOM_P8_HBBAR_EN) {
+ psi = alloc_psi(base);
+ if (!psi)
+ return NULL;
+ psi->working = true;
+ psi->regs = (void *)(val & ~PSIHB_XSCOM_P8_HBBAR_EN);
+ } else
+ printf("PSI[0x%03x]: Working link not found\n", chip->id);
+
+ return psi;
+}
+
+static bool psi_init_psihb(struct dt_node *psihb)
+{
+ uint32_t chip_id = dt_get_chip_id(psihb);
+ struct proc_chip *chip = get_chip(chip_id);
+ struct psi *psi = NULL;
+ u64 base, val;
+
+ if (!chip) {
+ prerror("PSI: Can't find chip!\n");
+ return false;
+ }
+
+ base = dt_get_address(psihb, 0, NULL);
+
+ if (dt_node_is_compatible(psihb, "ibm,power7-psihb-x"))
+ psi = psi_probe_p7(chip, base);
+ else if (dt_node_is_compatible(psihb, "ibm,power8-psihb-x"))
+ psi = psi_probe_p8(chip, base);
+ else {
+ prerror("PSI: Unknown processor type\n");
+ return false;
+ }
+ if (!psi)
+ return false;
+
+ list_add(&psis, &psi->list);
+
+ val = in_be64(psi->regs + PSIHB_CR);
+ if (val & PSIHB_CR_FSP_LINK_ACTIVE) {
+ lock(&psi_lock);
+ psi->active = true;
+ unlock(&psi_lock);
+ }
+
+ psi->chip_id = chip->id;
+ psi->interrupt = get_psi_interrupt(chip->id);
+
+ psi_create_mm_dtnode(psi);
+ psi_register_interrupts(psi);
+ psi_activate_phb(psi);
+
+ printf("PSI[0x%03x]: Found PSI bridge [working=%d, active=%d]\n",
+ psi->chip_id, psi->working, psi->active);
+ return true;
+}
+
+void psi_fsp_link_in_use(struct psi *psi __unused)
+{
+ static bool poller_created = false;
+
+ /* Do this once only */
+ if (!poller_created) {
+ poller_created = true;
+ opal_add_poller(psi_link_poll, NULL);
+ }
+}
+
+void psi_init(void)
+{
+ struct dt_node *np;
+
+ dt_for_each_compatible(dt_root, np, "ibm,psihb-x")
+ psi_init_psihb(np);
+}
diff --git a/hw/sfc-ctrl.c b/hw/sfc-ctrl.c
new file mode 100644
index 0000000..de163c5
--- /dev/null
+++ b/hw/sfc-ctrl.c
@@ -0,0 +1,523 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <lpc.h>
+#include <sfc-ctrl.h>
+
+#include <libflash/libflash.h>
+#include <libflash/libflash-priv.h>
+
+/* Offset of SFC registers in FW space */
+#define SFC_CMDREG_OFFSET 0x00000c00
+/* Offset of SFC command buffer in FW space */
+#define SFC_CMDBUF_OFFSET 0x00000d00
+/* Offset of flash MMIO mapping in FW space */
+#define SFC_MMIO_OFFSET 0x0c000000
+
+
+/*
+ * Register definitions
+ */
+#define SFC_REG_CONF 0x10 /* CONF: Direct Access Configuration */
+#define SFC_REG_CONF_FRZE (1 << 3)
+#define SFC_REG_CONF_ECCEN (1 << 2)
+#define SFC_REG_CONF_DRCD (1 << 1)
+#define SFC_REG_CONF_FLRLD (1 << 0)
+
+#define SFC_REG_STATUS 0x0C /* STATUS : Status Reg */
+#define SFC_REG_STATUS_NX_ON_SHFT 28
+#define SFC_REG_STATUS_RWP (1 << 27)
+#define SFC_REG_STATUS_FOURBYTEAD (1 << 26)
+#define SFC_REG_STATUS_ILLEGAL (1 << 4)
+#define SFC_REG_STATUS_ECCERRCNTN (1 << 3)
+#define SFC_REG_STATUS_ECCUEN (1 << 2)
+#define SFC_REG_STATUS_DONE (1 << 0)
+
+#define SFC_REG_CMD 0x40 /* CMD : Command */
+#define SFC_REG_CMD_OPCODE_SHFT 9
+#define SFC_REG_CMD_LENGTH_SHFT 0
+
+#define SFC_REG_SPICLK 0x3C /* SPICLK: SPI clock rate config */
+#define SFC_REG_SPICLK_OUTDLY_SHFT 24
+#define SFC_REG_SPICLK_INSAMPDLY_SHFT 16
+#define SFC_REG_SPICLK_CLKHI_SHFT 8
+#define SFC_REG_SPICLK_CLKLO_SHFT 0
+
+#define SFC_REG_ADR 0x44 /* ADR : Address */
+#define SFC_REG_ERASMS 0x48 /* ERASMS : Small Erase Block Size */
+#define SFC_REG_ERASLGS 0x4C /* ERALGS : Large Erase Block Size */
+#define SFC_REG_CONF4 0x54 /* CONF4 : SPI Op Code for Small Erase */
+#define SFC_REG_CONF5 0x58 /* CONF5 : Small Erase Size config reg */
+
+#define SFC_REG_CONF8 0x64 /* CONF8 : Read Command */
+#define SFC_REG_CONF8_CSINACTIVERD_SHFT 18
+#define SFC_REG_CONF8_DUMMY_SHFT 8
+#define SFC_REG_CONF8_READOP_SHFT 0
+
+#define SFC_REG_ADRCBF 0x80 /* ADRCBF : First Intf NOR Addr Offset */
+#define SFC_REG_ADRCMF 0x84 /* ADRCMF : First Intf NOR Allocation */
+#define SFC_REG_ADRCBS 0x88 /* ADRCBS : Second Intf NOR Addr Offset */
+#define SFC_REG_ADRCMS 0x8C /* ADRCMS : Second Intf NOR Allocation */
+#define SFC_REG_OADRNB 0x90 /* OADRNB : Direct Access OBP Window Base Address */
+#define SFC_REG_OADRNS 0x94 /* OADRNS : DIrect Access OPB Window Size */
+
+#define SFC_REG_CHIPIDCONF 0x9C /* CHIPIDCONF : config ChipId CMD */
+#define SFC_REG_CHIPIDCONF_OPCODE_SHFT 24
+#define SFC_REG_CHIPIDCONF_READ (1 << 23)
+#define SFC_REG_CHIPIDCONF_WRITE (1 << 22)
+#define SFC_REG_CHIPIDCONF_USE_ADDR (1 << 21)
+#define SFC_REG_CHIPIDCONF_DUMMY_SHFT 16
+#define SFC_REG_CHIPIDCONF_LEN_SHFT 0
+
+/*
+ * SFC Opcodes
+ */
+#define SFC_OP_READRAW 0x03 /* Read Raw */
+#define SFC_OP_WRITERAW 0x02 /* Write Raw */
+#define SFC_OP_ERASM 0x32 /* Erase Small */
+#define SFC_OP_ERALG 0x34 /* Erase Large */
+#define SFC_OP_ENWRITPROT 0x53 /* Enable WRite Protect */
+#define SFC_OP_CHIPID 0x1F /* Get Chip ID */
+#define SFC_OP_STATUS 0x05 /* Get Status */
+#define SFC_OP_TURNOFF 0x5E /* Turn Off */
+#define SFC_OP_TURNON 0x50 /* Turn On */
+#define SFC_OP_ABORT 0x6F /* Super-Abort */
+#define SFC_OP_START4BA 0x37 /* Start 4BA */
+#define SFC_OP_END4BA 0x69 /* End 4BA */
+
+/* Command buffer size */
+#define SFC_CMDBUF_SIZE 256
+
+struct sfc_ctrl {
+ /* Erase sizes */
+ uint32_t small_er_size;
+ uint32_t large_er_size;
+
+ /* Current 4b mode */
+ bool mode_4b;
+
+ /* Callbacks */
+ struct spi_flash_ctrl ops;
+};
+
+/* Command register support */
+static inline int sfc_reg_read(uint8_t reg, uint32_t *val)
+{
+ uint32_t tmp;
+ int rc;
+
+ *val = 0xffffffff;
+ rc = lpc_fw_read32(&tmp, SFC_CMDREG_OFFSET + reg);
+ if (rc)
+ return rc;
+ *val = be32_to_cpu(tmp);
+ return 0;
+}
+
+static inline int sfc_reg_write(uint8_t reg, uint32_t val)
+{
+ return lpc_fw_write32(cpu_to_be32(val), SFC_CMDREG_OFFSET + reg);
+}
+
+static int sfc_buf_write(uint32_t len, const void *data)
+{
+ uint32_t tmp, off = 0;
+ int rc;
+
+ if (len > SFC_CMDBUF_SIZE)
+ return FLASH_ERR_PARM_ERROR;
+
+ while (len >= 4) {
+ tmp = *(const uint32_t *)data;
+ rc = lpc_fw_write32(tmp, SFC_CMDBUF_OFFSET + off);
+ if (rc)
+ return rc;
+ off += 4;
+ len -= 4;
+ data += 4;
+ }
+ if (!len)
+ return 0;
+
+ /* lpc_fw_write operates on BE values so that's what we layout
+ * in memory with memcpy. The swap in the register on LE doesn't
+ * matter, the result in memory will be in the right order.
+ */
+ tmp = -1;
+ memcpy(&tmp, data, len);
+ return lpc_fw_write32(tmp, SFC_CMDBUF_OFFSET + off);
+}
+
+static int sfc_buf_read(uint32_t len, void *data)
+{
+ uint32_t tmp, off = 0;
+ int rc;
+
+ if (len > SFC_CMDBUF_SIZE)
+ return FLASH_ERR_PARM_ERROR;
+
+ while (len >= 4) {
+ rc = lpc_fw_read32(data, SFC_CMDBUF_OFFSET + off);
+ if (rc)
+ return rc;
+ off += 4;
+ len -= 4;
+ data += 4;
+ }
+ if (!len)
+ return 0;
+
+ rc = lpc_fw_read32(&tmp, SFC_CMDBUF_OFFSET + off);
+ if (rc)
+ return rc;
+ /* We know tmp contains a big endian value, so memcpy is
+ * our friend here
+ */
+ memcpy(data, &tmp, len);
+ return 0;
+}
+
+/* Polls until SFC indicates command is complete */
+static int sfc_poll_complete(void)
+{
+ uint32_t status, timeout;
+ struct timespec ts;
+
+ /*
+ * A full 256 bytes read/write command will take at least
+ * 126us. Smaller commands are faster but we use less of
+ * them. So let's sleep in increments of 100us
+ */
+ ts.tv_sec = 0;
+ ts.tv_nsec = 100000;
+
+ /*
+ * Use a 1s timeout which should be sufficient for the
+ * commands we use
+ */
+ timeout = 10000;
+
+ do {
+ int rc;
+
+ rc = sfc_reg_read(SFC_REG_STATUS, &status);
+ if (rc)
+ return rc;
+ if (status & SFC_REG_STATUS_DONE)
+ break;
+ if (--timeout == 0)
+ return FLASH_ERR_CTRL_TIMEOUT;
+ nanosleep(&ts, NULL);
+ } while (true);
+
+ return 0;
+}
+
+static int sfc_exec_command(uint8_t opcode, uint32_t length)
+{
+ int rc = 0;
+ uint32_t cmd_reg = 0;
+
+ if (opcode > 0x7f || length > 0x1ff)
+ return FLASH_ERR_PARM_ERROR;
+
+ /* Write command register to start execution */
+ cmd_reg |= (opcode << SFC_REG_CMD_OPCODE_SHFT);
+ cmd_reg |= (length << SFC_REG_CMD_LENGTH_SHFT);
+ rc = sfc_reg_write(SFC_REG_CMD, cmd_reg);
+ if (rc)
+ return rc;
+
+ /* Wait for command to complete */
+ return sfc_poll_complete();
+}
+
+static int sfc_chip_id(struct spi_flash_ctrl *ctrl, uint8_t *id_buf,
+ uint32_t *id_size)
+{
+ uint32_t idconf;
+ int rc;
+
+ (void)ctrl;
+
+ if ((*id_size) < 3)
+ return FLASH_ERR_PARM_ERROR;
+
+ /*
+ * XXX This will not work in locked down mode but we assume that
+ * in this case, the chip ID command is already properly programmed
+ * and the SFC will ignore this. However I haven't verified...
+ */
+ idconf = ((uint64_t)CMD_RDID) << SFC_REG_CHIPIDCONF_OPCODE_SHFT;
+ idconf |= SFC_REG_CHIPIDCONF_READ;
+ idconf |= (3ul << SFC_REG_CHIPIDCONF_LEN_SHFT);
+ (void)sfc_reg_write(SFC_REG_CHIPIDCONF, idconf);
+
+ /* Perform command */
+ rc = sfc_exec_command(SFC_OP_CHIPID, 0);
+ if (rc)
+ return rc;
+
+ /* Read chip ID */
+ rc = sfc_buf_read(3, id_buf);
+ if (rc)
+ return rc;
+ *id_size = 3;
+
+ return 0;
+}
+
+
+static int sfc_read(struct spi_flash_ctrl *ctrl, uint32_t pos,
+ void *buf, uint32_t len)
+{
+ (void)ctrl;
+
+ while(len) {
+ uint32_t chunk = len;
+ int rc;
+
+ if (chunk > SFC_CMDBUF_SIZE)
+ chunk = SFC_CMDBUF_SIZE;
+ rc = sfc_reg_write(SFC_REG_ADR, pos);
+ if (rc)
+ return rc;
+ rc = sfc_exec_command(SFC_OP_READRAW, chunk);
+ if (rc)
+ return rc;
+ rc = sfc_buf_read(chunk, buf);
+ if (rc)
+ return rc;
+ len -= chunk;
+ pos += chunk;
+ buf += chunk;
+ }
+ return 0;
+}
+
+static int sfc_write(struct spi_flash_ctrl *ctrl, uint32_t addr,
+ const void *buf, uint32_t size)
+{
+ uint32_t chunk;
+ int rc;
+
+ (void)ctrl;
+
+ while(size) {
+ /* We shall not cross a page boundary */
+ chunk = 0x100 - (addr & 0xff);
+ if (chunk > size)
+ chunk = size;
+
+ /* Write to SFC write buffer */
+ rc = sfc_buf_write(chunk, buf);
+ if (rc)
+ return rc;
+
+ /* Program address */
+ rc = sfc_reg_write(SFC_REG_ADR, addr);
+ if (rc)
+ return rc;
+
+ /* Send command */
+ rc = sfc_exec_command(SFC_OP_WRITERAW, chunk);
+ if (rc)
+ return rc;
+
+ addr += chunk;
+ buf += chunk;
+ size -= chunk;
+ }
+ return 0;
+}
+
+static int sfc_erase(struct spi_flash_ctrl *ctrl, uint32_t addr,
+ uint32_t size)
+{
+ struct sfc_ctrl *ct = container_of(ctrl, struct sfc_ctrl, ops);
+ uint32_t sm_mask = ct->small_er_size - 1;
+ uint32_t lg_mask = ct->large_er_size - 1;
+ uint32_t chunk;
+ uint8_t cmd;
+ int rc;
+
+ while(size) {
+ /* Choose erase size for this chunk */
+ if (((addr | size) & lg_mask) == 0) {
+ chunk = ct->large_er_size;
+ cmd = SFC_OP_ERALG;
+ } else if (((addr | size) & sm_mask) == 0) {
+ chunk = ct->small_er_size;
+ cmd = SFC_OP_ERASM;
+ } else
+ return FLASH_ERR_ERASE_BOUNDARY;
+
+ rc = sfc_reg_write(SFC_REG_ADR, addr);
+ if (rc)
+ return rc;
+ rc = sfc_exec_command(cmd, 0);
+ if (rc)
+ return rc;
+ addr += chunk;
+ size -= chunk;
+ }
+ return 0;
+}
+
+static int sfc_setup(struct spi_flash_ctrl *ctrl, struct flash_info *info,
+ uint32_t *tsize)
+{
+ struct sfc_ctrl *ct = container_of(ctrl, struct sfc_ctrl, ops);
+ uint32_t er_flags;
+
+ (void)tsize;
+
+ /* Keep non-erase related flags */
+ er_flags = ~FL_ERASE_ALL;
+
+ /* Add supported erase sizes */
+ if (ct->small_er_size == 0x1000 || ct->large_er_size == 0x1000)
+ er_flags |= FL_ERASE_4K;
+ if (ct->small_er_size == 0x8000 || ct->large_er_size == 0x8000)
+ er_flags |= FL_ERASE_32K;
+ if (ct->small_er_size == 0x10000 || ct->large_er_size == 0x10000)
+ er_flags |= FL_ERASE_64K;
+
+ /* Mask the flags out */
+ info->flags &= er_flags;
+
+ return 0;
+}
+
+static int sfc_set_4b(struct spi_flash_ctrl *ctrl, bool enable)
+{
+ struct sfc_ctrl *ct = container_of(ctrl, struct sfc_ctrl, ops);
+ int rc;
+
+ rc = sfc_exec_command(enable ? SFC_OP_START4BA : SFC_OP_END4BA, 0);
+ if (rc)
+ return rc;
+ ct->mode_4b = enable;
+ return 0;
+}
+
+static void sfc_validate_er_size(uint32_t *size)
+{
+ if (*size == 0)
+ return;
+
+ /* We only support 4k, 32k and 64k */
+ if (*size != 0x1000 && *size != 0x8000 && *size != 0x10000) {
+ FL_ERR("SFC: Erase size %d bytes unsupported\n", *size);
+ *size = 0;
+ }
+}
+
+static int sfc_init(struct sfc_ctrl *ct)
+{
+ int rc;
+ uint32_t status;
+
+ /*
+ * Assumptions: The controller has been fully initialized
+ * by an earlier FW layer setting the chip ID command, the
+ * erase sizes, and configuring the timings for reads and
+ * writes.
+ *
+ * This driver is meant to be usable if the configuration
+ * is in lock down.
+ *
+ * If that wasn't the case, we could configure some sane
+ * defaults here and tuned values in setup() after the
+ * chip has been identified.
+ */
+
+ /* Read erase sizes from flash */
+ rc = sfc_reg_read(SFC_REG_ERASMS, &ct->small_er_size);
+ if (rc)
+ return rc;
+ sfc_validate_er_size(&ct->small_er_size);
+ rc = sfc_reg_read(SFC_REG_ERASLGS, &ct->large_er_size);
+ if (rc)
+ return rc;
+ sfc_validate_er_size(&ct->large_er_size);
+
+ /* No erase sizes we can cope with ? Ouch... */
+ if ((ct->small_er_size == 0 && ct->large_er_size == 0) ||
+ (ct->large_er_size && (ct->small_er_size > ct->large_er_size))) {
+ FL_ERR("SFC: No supported erase sizes !\n");
+ return FLASH_ERR_CTRL_CONFIG_MISMATCH;
+ }
+
+ FL_INF("SFC: Suppored erase sizes:");
+ if (ct->small_er_size)
+ FL_INF(" %dKB", ct->small_er_size >> 10);
+ if (ct->large_er_size)
+ FL_INF(" %dKB", ct->large_er_size >> 10);
+ FL_INF("\n");
+
+ /* Read current state of 4 byte addressing */
+ rc = sfc_reg_read(SFC_REG_STATUS, &status);
+ if (rc)
+ return rc;
+ ct->mode_4b = !!(status & SFC_REG_STATUS_FOURBYTEAD);
+
+ return 0;
+}
+
+int sfc_open(struct spi_flash_ctrl **ctrl)
+{
+ struct sfc_ctrl *ct;
+ int rc;
+
+ *ctrl = NULL;
+ ct = malloc(sizeof(*ct));
+ if (!ct) {
+ FL_ERR("SFC: Failed to allocate\n");
+ return FLASH_ERR_MALLOC_FAILED;
+ }
+ memset(ct, 0, sizeof(*ct));
+ ct->ops.chip_id = sfc_chip_id;
+ ct->ops.setup = sfc_setup;
+ ct->ops.set_4b = sfc_set_4b;
+ ct->ops.read = sfc_read;
+ ct->ops.write = sfc_write;
+ ct->ops.erase = sfc_erase;
+
+ rc = sfc_init(ct);
+ if (rc)
+ goto fail;
+ *ctrl = &ct->ops;
+ return 0;
+ fail:
+ free(ct);
+ return rc;
+}
+
+void sfc_close(struct spi_flash_ctrl *ctrl)
+{
+ struct sfc_ctrl *ct = container_of(ctrl, struct sfc_ctrl, ops);
+
+ /* Free the whole lot */
+ free(ct);
+}
+
diff --git a/hw/slw.c b/hw/slw.c
new file mode 100644
index 0000000..3522458
--- /dev/null
+++ b/hw/slw.c
@@ -0,0 +1,875 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Handle ChipTOD chip & configure core timebases
+ */
+#include <skiboot.h>
+#include <xscom.h>
+#include <io.h>
+#include <cpu.h>
+#include <chip.h>
+#include <mem_region.h>
+#include <chiptod.h>
+#include <interrupts.h>
+#include <timebase.h>
+#include <fsp-elog.h>
+
+#ifdef __HAVE_LIBPORE__
+#include <p8_pore_table_gen_api.H>
+#include <sbe_xip_image.h>
+#endif
+
+//#define DBG(fmt...) printf("SLW: " fmt)
+#define DBG(fmt...) do { } while(0)
+
+#define MAX_RESET_PATCH_SIZE 64
+static uint32_t slw_saved_reset[MAX_RESET_PATCH_SIZE];
+
+static bool slw_current_le = false;
+
+/* Assembly in head.S */
+extern void enter_rvwinkle(void);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SLW_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_SLW,
+ OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SLW_SET, OPAL_PLATFORM_ERR_EVT, OPAL_SLW,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SLW_GET, OPAL_PLATFORM_ERR_EVT, OPAL_SLW,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_SLW_REG, OPAL_PLATFORM_ERR_EVT, OPAL_SLW,
+ OPAL_PLATFORM_FIRMWARE, OPAL_INFO,
+ OPAL_NA, NULL);
+
+static void slw_do_rvwinkle(void *data)
+{
+ struct cpu_thread *cpu = this_cpu();
+ struct cpu_thread *master = data;
+ uint64_t lpcr = mfspr(SPR_LPCR);
+ struct proc_chip *chip;
+
+ /* Setup our ICP to receive IPIs */
+ icp_prep_for_rvwinkle();
+
+ /* Setup LPCR to wakeup on external interrupts only */
+ mtspr(SPR_LPCR, ((lpcr & ~SPR_LPCR_P8_PECE) | SPR_LPCR_P8_PECE2));
+
+ printf("SLW: CPU PIR 0x%04x goint to rvwinkle...\n", cpu->pir);
+
+ /* Tell that we got it */
+ cpu->state = cpu_state_rvwinkle;
+
+ enter_rvwinkle();
+
+ /* Ok, it's ours again */
+ cpu->state = cpu_state_active;
+
+ printf("SLW: CPU PIR 0x%04x woken up !\n", cpu->pir);
+
+ /* Cleanup our ICP */
+ reset_cpu_icp();
+
+ /* Resync timebase */
+ chiptod_wakeup_resync();
+
+ /* Restore LPCR */
+ mtspr(SPR_LPCR, lpcr);
+
+ /* If we are passed a master pointer we are the designated
+ * waker, let's proceed. If not, return, we are finished.
+ */
+ if (!master)
+ return;
+
+ printf("SLW: CPU PIR 0x%04x waiting for master...\n", cpu->pir);
+
+ /* Allriiiight... now wait for master to go down */
+ while(master->state != cpu_state_rvwinkle)
+ sync();
+
+ /* XXX Wait one second ! (should check xscom state ? ) */
+ time_wait_ms(1000);
+
+ for_each_chip(chip) {
+ struct cpu_thread *c;
+ uint64_t tmp;
+ for_each_available_core_in_chip(c, chip->id) {
+ xscom_read(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
+ EX_PM_IDLE_STATE_HISTORY_PHYP),
+ &tmp);
+ DBG("SLW: core %x:%x history: 0x%016llx (mid2)\n",
+ chip->id, pir_to_core_id(c->pir), tmp);
+ }
+ }
+
+ printf("SLW: Waking master (PIR 0x%04x)...\n", master->pir);
+
+ /* Now poke all the secondary threads on the master's core */
+ for_each_cpu(cpu) {
+ if (!cpu_is_sibling(cpu, master) || (cpu == master))
+ continue;
+ icp_kick_cpu(cpu);
+
+ /* Wait for it to claim to be back (XXX ADD TIMEOUT) */
+ while(cpu->state != cpu_state_active)
+ sync();
+ }
+
+ /* Now poke the master and be gone */
+ icp_kick_cpu(master);
+}
+
+static void slw_patch_reset(void)
+{
+ extern uint32_t rvwinkle_patch_start;
+ extern uint32_t rvwinkle_patch_end;
+ uint32_t *src, *dst, *sav;
+
+ BUILD_ASSERT((&rvwinkle_patch_end - &rvwinkle_patch_start) <=
+ MAX_RESET_PATCH_SIZE);
+
+ src = &rvwinkle_patch_start;
+ dst = (uint32_t *)0x100;
+ sav = slw_saved_reset;
+ while(src < &rvwinkle_patch_end) {
+ *(sav++) = *(dst);
+ *(dst++) = *(src++);
+ }
+ sync_icache();
+}
+
+static void slw_unpatch_reset(void)
+{
+ extern uint32_t rvwinkle_patch_start;
+ extern uint32_t rvwinkle_patch_end;
+ uint32_t *src, *dst, *sav;
+
+ src = &rvwinkle_patch_start;
+ dst = (uint32_t *)0x100;
+ sav = slw_saved_reset;
+ while(src < &rvwinkle_patch_end) {
+ *(dst++) = *(sav++);
+ src++;
+ }
+ sync_icache();
+}
+
+static bool slw_general_init(struct proc_chip *chip, struct cpu_thread *c)
+{
+ uint32_t core = pir_to_core_id(c->pir);
+ uint64_t tmp;
+ int rc;
+
+ /* PowerManagement GP0 clear PM_DISABLE */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP0), &tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Failed to read PM_GP0\n");
+ return false;
+ }
+ tmp = tmp & ~0x8000000000000000ULL;
+ rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP0), tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Failed to write PM_GP0\n");
+ return false;
+ }
+ DBG("SLW: PMGP0 set to 0x%016llx\n", tmp);
+
+ /* Read back for debug */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP0), &tmp);
+ DBG("SLW: PMGP0 read 0x%016llx\n", tmp);
+
+
+ /* Set CORE and ECO PFET Vret to select zero */
+ rc = xscom_write(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_CORE_PFET_VRET), 0);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Failed to write PM_CORE_PFET_VRET\n");
+ return false;
+ }
+ rc = xscom_write(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_CORE_ECO_VRET), 0);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Failed to write PM_CORE_ECO_VRET\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool slw_set_overrides(struct proc_chip *chip, struct cpu_thread *c)
+{
+ uint32_t core = pir_to_core_id(c->pir);
+ uint64_t tmp;
+ int rc;
+
+ /*
+ * Set ENABLE_IGNORE_RECOV_ERRORS in OHA_MODE_REG
+ *
+ * XXX FIXME: This should be only done for "forced" winkle such as
+ * when doing repairs or LE transition, and we should restore the
+ * original value when done
+ */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX(core, PM_OHA_MODE_REG),
+ &tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_SET),
+ "SLW: Failed to read PM_OHA_MODE_REG\n");
+ return false;
+ }
+ tmp = tmp | 0x8000000000000000ULL;
+ rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX(core, PM_OHA_MODE_REG),
+ tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_SET),
+ "SLW: Failed to write PM_OHA_MODE_REG\n");
+ return false;
+ }
+ DBG("SLW: PM_OHA_MODE_REG set to 0x%016llx\n", tmp);
+
+ /* Read back for debug */
+ rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX(core, PM_OHA_MODE_REG),&tmp);
+ DBG("SLW: PM_OHA_MODE_REG read 0x%016llx\n", tmp);
+
+ /*
+ * Clear special wakeup bits that could hold power mgt
+ *
+ * XXX FIXME: See above
+ */
+ rc = xscom_write(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_SPECIAL_WAKEUP_FSP),
+ 0);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_SET),
+ "SLW: Failed to write PM_SPECIAL_WAKEUP_FSP\n");
+ return false;
+ }
+ rc = xscom_write(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_SPECIAL_WAKEUP_OCC),
+ 0);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_SET),
+ "SLW: Failed to write PM_SPECIAL_WAKEUP_OCC\n");
+ return false;
+ }
+ rc = xscom_write(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_SPECIAL_WAKEUP_PHYP),
+ 0);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_SET),
+ "SLW: Failed to write PM_SPECIAL_WAKEUP_PHYP\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool slw_unset_overrides(struct proc_chip *chip, struct cpu_thread *c)
+{
+ uint32_t core = pir_to_core_id(c->pir);
+
+ /* XXX FIXME: Save and restore the overrides */
+ printf("SLW: slw_unset_overrides %x:%x\n", chip->id, core);
+ return true;
+}
+
+static bool slw_set_deep_mode(struct proc_chip *chip, struct cpu_thread *c)
+{
+ uint32_t core = pir_to_core_id(c->pir);
+ uint64_t tmp;
+ int rc;
+
+ /* Init PM GP1 for fast mode or deep mode */
+ rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1),
+ EX_PM_SETUP_GP1_DEEP_SLEEP);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_SET),
+ "SLW: Failed to write PM_GP1\n");
+ return false;
+ }
+
+ /* Read back for debug */
+ xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1), &tmp);
+ DBG("SLW: PMGP1 read 0x%016llx\n", tmp);
+ return true;
+}
+
+static bool slw_set_fast_mode(struct proc_chip *chip, struct cpu_thread *c)
+{
+ uint32_t core = pir_to_core_id(c->pir);
+ uint64_t tmp;
+ int rc;
+
+ /* Init PM GP1 for fast mode or deep mode */
+ rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1),
+ EX_PM_SETUP_GP1_FAST_SLEEP);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_SET),
+ "SLW: Failed to write PM_GP1\n");
+ return false;
+ }
+
+ /* Read back for debug */
+ xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_GP1), &tmp);
+ DBG("SLW: PMGP1 read 0x%016llx\n", tmp);
+ return true;
+}
+
+static bool slw_get_idle_state_history(struct proc_chip *chip, struct cpu_thread *c)
+{
+ uint32_t core = pir_to_core_id(c->pir);
+ uint64_t tmp;
+ int rc;
+
+ /* Cleanup history */
+ rc = xscom_read(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_IDLE_STATE_HISTORY_PHYP),
+ &tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_GET),
+ "SLW: Failed to read PM_IDLE_STATE_HISTORY\n");
+ return false;
+ }
+
+ DBG("SLW: core %x:%x history: 0x%016llx (old1)\n",
+ chip->id, core, tmp);
+
+ rc = xscom_read(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_IDLE_STATE_HISTORY_PHYP),
+ &tmp);
+
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_GET),
+ "SLW: Failed to read PM_IDLE_STATE_HISTORY\n");
+ return false;
+ }
+
+ DBG("SLW: core %x:%x history: 0x%016llx (old2)\n",
+ chip->id, core, tmp);
+
+ return true;
+}
+
+static bool slw_prepare_core(struct proc_chip *chip, struct cpu_thread *c)
+{
+ DBG("SLW: Prepare core %x:%x\n",
+ chip->id, pir_to_core_id(c->pir));
+
+ if(!slw_general_init(chip, c))
+ return false;
+ if(!slw_set_overrides(chip, c))
+ return false;
+ if(!slw_set_deep_mode(chip, c))
+ return false;
+ if(!slw_get_idle_state_history(chip, c))
+ return false;
+
+ return true;
+}
+
+static bool fastsleep_prepare_core(struct proc_chip *chip, struct cpu_thread *c)
+{
+ DBG("FASTSLEEP: Prepare core %x:%x\n",
+ chip->id, pir_to_core_id(c->pir));
+
+ if(!slw_general_init(chip, c))
+ return false;
+ if(!slw_set_overrides(chip, c))
+ return false;
+ if(!slw_set_fast_mode(chip, c))
+ return false;
+ if(!slw_get_idle_state_history(chip, c))
+ return false;
+
+ return true;
+
+}
+
+/* Define device-tree fields */
+#define MAX_NAME_LEN 16
+struct cpu_idle_states {
+ char name[MAX_NAME_LEN];
+ u32 latency_ns;
+ u32 flags;
+ u64 pmicr;
+ u64 pmicr_mask;
+};
+
+/* Flag definitions */
+
+#define IDLE_DEC_STOP 0x00000001 /* Decrementer would stop */
+#define IDLE_TB_STOP 0x00000002 /* Timebase would stop */
+#define IDLE_LOSE_USER_CONTEXT 0x00000100 /* Restore GPRs like nap */
+#define IDLE_LOSE_HYP_CONTEXT 0x00000200 /* Restore hypervisor resource
+ from PACA pointer */
+#define IDLE_LOSE_FULL_CONTEXT 0x00000400 /* Restore hypervisor resource
+ by searching PACA */
+#define IDLE_USE_INST_NAP 0x00010000 /* Use nap instruction */
+#define IDLE_USE_INST_SLEEP 0x00020000 /* Use sleep instruction */
+#define IDLE_USE_INST_WINKLE 0x00040000 /* Use winkle instruction */
+#define IDLE_USE_PMICR 0x00800000 /* Use SPR PMICR instruction */
+
+#define IDLE_FASTSLEEP_PMICR 0x0000002000000000
+#define IDLE_DEEPSLEEP_PMICR 0x0000003000000000
+#define IDLE_SLEEP_PMICR_MASK 0x0000003000000000
+
+#define IDLE_FASTWINKLE_PMICR 0x0000000000200000
+#define IDLE_DEEPWINKLE_PMICR 0x0000000000300000
+#define IDLE_WINKLE_PMICR_MASK 0x0000000000300000
+
+static struct cpu_idle_states power7_cpu_idle_states[] = {
+ { /* nap */
+ .name = "nap",
+ .latency_ns = 1000,
+ .flags = 0*IDLE_DEC_STOP \
+ | 0*IDLE_TB_STOP \
+ | 1*IDLE_LOSE_USER_CONTEXT \
+ | 0*IDLE_LOSE_HYP_CONTEXT \
+ | 0*IDLE_LOSE_FULL_CONTEXT \
+ | 1*IDLE_USE_INST_NAP \
+ | 0*IDLE_USE_INST_SLEEP \
+ | 0*IDLE_USE_INST_WINKLE \
+ | 0*IDLE_USE_PMICR,
+ .pmicr = 0,
+ .pmicr_mask = 0 },
+};
+
+static struct cpu_idle_states power8_cpu_idle_states[] = {
+ { /* nap */
+ .name = "nap",
+ .latency_ns = 1000,
+ .flags = 0*IDLE_DEC_STOP \
+ | 0*IDLE_TB_STOP \
+ | 1*IDLE_LOSE_USER_CONTEXT \
+ | 0*IDLE_LOSE_HYP_CONTEXT \
+ | 0*IDLE_LOSE_FULL_CONTEXT \
+ | 1*IDLE_USE_INST_NAP \
+ | 0*IDLE_USE_INST_SLEEP \
+ | 0*IDLE_USE_INST_WINKLE \
+ | 0*IDLE_USE_PMICR,
+ .pmicr = 0,
+ .pmicr_mask = 0 },
+ { /* fast sleep */
+ .name = "fastsleep",
+ .latency_ns = 100000,
+ .flags = 1*IDLE_DEC_STOP \
+ | 1*IDLE_TB_STOP \
+ | 1*IDLE_LOSE_USER_CONTEXT \
+ | 0*IDLE_LOSE_HYP_CONTEXT \
+ | 0*IDLE_LOSE_FULL_CONTEXT \
+ | 0*IDLE_USE_INST_NAP \
+ | 1*IDLE_USE_INST_SLEEP \
+ | 0*IDLE_USE_INST_WINKLE \
+ | 0*IDLE_USE_PMICR, /* Not enabled until deep
+ states are available */
+ .pmicr = IDLE_FASTSLEEP_PMICR,
+ .pmicr_mask = IDLE_SLEEP_PMICR_MASK },
+};
+
+/* Add device tree properties to describe idle states */
+void add_cpu_idle_state_properties(void)
+{
+ struct dt_node *power_mgt;
+ struct cpu_idle_states *states;
+ struct proc_chip *chip;
+ int nr_states;
+
+ printf("CPU idle state device tree init\n");
+
+ /* Create /ibm,opal/power-mgt */
+ power_mgt = dt_new(opal_node, "power-mgt");
+ if (!power_mgt) {
+ printf("creating dt node /ibm,opal/power-mgt failed\n");
+ return;
+ }
+
+ /*
+ * Chose the right state table for the chip
+ *
+ * XXX We use the first chip version, we should probably look
+ * for the smaller of all chips instead..
+ */
+ chip = next_chip(NULL);
+ assert(chip);
+ if (chip->type == PROC_CHIP_P8_MURANO ||
+ chip->type == PROC_CHIP_P8_VENICE) {
+ const struct dt_property *p;
+ bool can_sleep = true;
+
+ p = dt_find_property(dt_root, "ibm,enabled-idle-states");
+
+ states = power8_cpu_idle_states;
+ nr_states = ARRAY_SIZE(power8_cpu_idle_states);
+
+ /* Check if hostboot say we can sleep */
+ if (p && !dt_prop_find_string(p, "fastsleep"))
+ can_sleep = false;
+
+ /* Clip to NAP only on Murano DD1.x */
+ if (chip->type == PROC_CHIP_P8_MURANO &&
+ chip->ec_level < 0x20)
+ can_sleep = false;
+
+ if (!can_sleep)
+ nr_states = 1;
+ } else {
+ states = power7_cpu_idle_states;
+ nr_states = ARRAY_SIZE(power7_cpu_idle_states);
+ }
+
+ /*
+ * XXX Creating variable size properties is awkward. For now we hard wire
+ * the 1 and 2 states cases. Long run we want to implement functions to
+ * "append" strings and cells to properties so we can just have a loop
+ * of nr_states here
+ */
+ switch (nr_states) {
+ case 1:
+ dt_add_property_strings(power_mgt, "ibm,cpu-idle-state-names",
+ states[0].name);
+ dt_add_property_cells(power_mgt, "ibm,cpu-idle-state-latencies-ns",
+ states[0].latency_ns);
+ dt_add_property_cells(power_mgt, "ibm,cpu-idle-state-flags",
+ states[0].flags);
+ dt_add_property_u64s(power_mgt, "ibm,cpu-idle-state-pmicr",
+ states[0].pmicr);
+ dt_add_property_u64s(power_mgt, "ibm,cpu-idle-state-pmicr-mask",
+ states[0].pmicr_mask);
+ break;
+ case 2:
+ dt_add_property_strings(power_mgt, "ibm,cpu-idle-state-names",
+ states[0].name,
+ states[1].name);
+ dt_add_property_cells(power_mgt, "ibm,cpu-idle-state-latencies-ns",
+ states[0].latency_ns,
+ states[1].latency_ns);
+ dt_add_property_cells(power_mgt, "ibm,cpu-idle-state-flags",
+ states[0].flags,
+ states[1].flags);
+ dt_add_property_u64s(power_mgt, "ibm,cpu-idle-state-pmicr",
+ states[0].pmicr,
+ states[1].pmicr);
+ dt_add_property_u64s(power_mgt, "ibm,cpu-idle-state-pmicr-mask",
+ states[0].pmicr_mask,
+ states[1].pmicr_mask);
+ break;
+ default:
+ prerror("SLW: Unsupported number of states\n");
+ }
+}
+
+static bool slw_prepare_chip(struct proc_chip *chip)
+{
+ struct cpu_thread *c;
+
+ for_each_available_core_in_chip(c, chip->id) {
+ if (!slw_prepare_core(chip, c))
+ return false;
+ }
+ return true;
+}
+
+static void slw_cleanup_core(struct proc_chip *chip, struct cpu_thread *c)
+{
+ uint64_t tmp;
+ int rc;
+
+ /* Display history to check transition */
+ rc = xscom_read(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
+ EX_PM_IDLE_STATE_HISTORY_PHYP),
+ &tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_GET),
+ "SLW: Failed to read PM_IDLE_STATE_HISTORY\n");
+ /* XXX error handling ? return false; */
+ }
+
+ printf("SLW: core %x:%x history: 0x%016llx (new1)\n",
+ chip->id, pir_to_core_id(c->pir), tmp);
+
+ rc = xscom_read(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
+ EX_PM_IDLE_STATE_HISTORY_PHYP),
+ &tmp);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_GET),
+ "SLW: Failed to read PM_IDLE_STATE_HISTORY\n");
+ /* XXX error handling ? return false; */
+ }
+
+ printf("SLW: core %x:%x history: 0x%016llx (new2)\n",
+ chip->id, pir_to_core_id(c->pir), tmp);
+
+ /*
+ * XXX FIXME: Error out if the transition didn't reach rvwinkle ?
+ */
+
+ /*
+ * XXX FIXME: We should restore a bunch of the EX bits we
+ * overwrite to sane values here
+ */
+ slw_unset_overrides(chip, c);
+}
+
+static void slw_cleanup_chip(struct proc_chip *chip)
+{
+ struct cpu_thread *c;
+
+ for_each_available_core_in_chip(c, chip->id)
+ slw_cleanup_core(chip, c);
+}
+
+#ifdef __HAVE_LIBPORE__
+static void slw_patch_scans(struct proc_chip *chip, bool le_mode)
+{
+ int64_t rc;
+ uint64_t old_val, new_val;
+
+ rc = sbe_xip_get_scalar((void *)chip->slw_base,
+ "skip_ex_override_ring_scans", &old_val);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_REG),
+ "SLW: Failed to read scan override on chip %d\n",
+ chip->id);
+ return;
+ }
+
+ new_val = le_mode ? 0 : 1;
+
+ DBG("SLW: Chip %d, LE value was: %lld, setting to %lld\n",
+ chip->id, old_val, new_val);
+
+ rc = sbe_xip_set_scalar((void *)chip->slw_base,
+ "skip_ex_override_ring_scans", new_val);
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_REG),
+ "SLW: Failed to set LE mode on chip %d\n", chip->id);
+ return;
+ }
+}
+#else
+static inline void slw_patch_scans(struct proc_chip *chip __unused,
+ bool le_mode __unused ) { }
+#endif /* __HAVE_LIBPORE__ */
+
+int64_t slw_reinit(uint64_t flags)
+{
+ struct proc_chip *chip;
+ struct cpu_thread *cpu;
+ bool has_waker = false;
+ bool target_le = slw_current_le;
+
+#ifndef __HAVE_LIBPORE__
+ return OPAL_UNSUPPORTED;
+#endif
+
+ if (flags & OPAL_REINIT_CPUS_HILE_BE)
+ target_le = false;
+ if (flags & OPAL_REINIT_CPUS_HILE_LE)
+ target_le = true;
+
+ DBG("SLW Reinit from CPU PIR 0x%04x, HILE set to %s endian...\n",
+ this_cpu()->pir, target_le ? "little" : "big");
+
+ /* Prepare chips/cores for rvwinkle */
+ for_each_chip(chip) {
+ if (!chip->slw_base) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Not found on chip %d\n", chip->id);
+ return OPAL_HARDWARE;
+ }
+ if (!slw_prepare_chip(chip)) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Error preparing chip %d\n", chip->id);
+ return OPAL_HARDWARE;
+ }
+ slw_patch_scans(chip, target_le);
+ }
+ slw_current_le = target_le;
+
+ /* XXX Save HIDs ? Or do that in head.S ... */
+
+ slw_patch_reset();
+
+ /* rvwinkle everybody and pick one to wake me once I rvwinkle myself */
+ for_each_available_cpu(cpu) {
+ struct cpu_thread *master = NULL;
+
+ if (cpu == this_cpu())
+ continue;
+
+ /* Pick up a waker for myself: it must not be a sibling of
+ * the current CPU and must be a thread 0 (so it gets to
+ * sync its timebase before doing time_wait_ms()
+ */
+ if (!has_waker && !cpu_is_sibling(cpu, this_cpu()) &&
+ cpu_is_thread0(cpu)) {
+ has_waker = true;
+ master = this_cpu();
+ }
+ __cpu_queue_job(cpu, slw_do_rvwinkle, master, true);
+
+ /* Wait for it to claim to be down */
+ while(cpu->state != cpu_state_rvwinkle)
+ sync();
+ }
+
+ /* XXX Wait one second ! (should check xscom state ? ) */
+ DBG("SLW: [TB=0x%016lx] Waiting one second...\n", mftb());
+ time_wait_ms(1000);
+ DBG("SLW: [TB=0x%016lx] Done.\n", mftb());
+
+ for_each_chip(chip) {
+ struct cpu_thread *c;
+ uint64_t tmp;
+ for_each_available_core_in_chip(c, chip->id) {
+ xscom_read(chip->id,
+ XSCOM_ADDR_P8_EX_SLAVE(pir_to_core_id(c->pir),
+ EX_PM_IDLE_STATE_HISTORY_PHYP),
+ &tmp);
+ printf("SLW: core %x:%x history: 0x%016llx (mid)\n",
+ chip->id, pir_to_core_id(c->pir), tmp);
+ }
+ }
+
+
+ /* Wake everybody except on my core */
+ for_each_cpu(cpu) {
+ if (cpu->state != cpu_state_rvwinkle ||
+ cpu_is_sibling(cpu, this_cpu()))
+ continue;
+ icp_kick_cpu(cpu);
+
+ /* Wait for it to claim to be back (XXX ADD TIMEOUT) */
+ while(cpu->state != cpu_state_active)
+ sync();
+ }
+
+ /* Did we find a waker ? If we didn't, that means we had no
+ * other core in the system, we can't do it
+ */
+ if (!has_waker) {
+ DBG("SLW: No candidate waker, giving up !\n");
+ return OPAL_HARDWARE;
+ }
+
+ /* Our siblings are rvwinkling, and our waker is waiting for us
+ * so let's just go down now
+ */
+ slw_do_rvwinkle(NULL);
+
+ slw_unpatch_reset();
+
+ for_each_chip(chip)
+ slw_cleanup_chip(chip);
+
+ DBG("SLW Reinit complete !\n");
+
+ return OPAL_SUCCESS;
+}
+
+#ifdef __HAVE_LIBPORE__
+static void slw_patch_regs(struct proc_chip *chip)
+{
+ struct cpu_thread *c;
+ void *image = (void *)chip->slw_base;
+ int rc;
+
+ for_each_available_cpu(c) {
+ if (c->chip_id != chip->id)
+ continue;
+
+ /* Clear HRMOR */
+ rc = p8_pore_gen_cpureg_fixed(image, P8_SLW_MODEBUILD_SRAM,
+ P8_SPR_HRMOR, 0,
+ cpu_get_core_index(c),
+ cpu_get_thread_index(c));
+ if (rc) {
+ log_simple_error(&e_info(OPAL_RC_SLW_REG),
+ "SLW: Failed to set HRMOR for CPU %x\n",
+ c->pir);
+ }
+
+ /* XXX Add HIDs etc... */
+ }
+}
+#endif /* __HAVE_LIBPORE__ */
+
+static void slw_init_chip(struct proc_chip *chip)
+{
+ int rc __unused;
+ struct cpu_thread *c;
+
+ prerror("SLW: Init chip 0x%x\n", chip->id);
+
+ if (!chip->slw_base) {
+ prerror("SLW: No image found !\n");
+ return;
+ }
+
+#ifdef __HAVE_LIBPORE__
+ /* Check actual image size */
+ rc = sbe_xip_get_scalar((void *)chip->slw_base, "image_size",
+ &chip->slw_image_size);
+ if (rc != 0) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Error %d reading SLW image size\n", rc);
+ /* XXX Panic ? */
+ chip->slw_base = 0;
+ chip->slw_bar_size = 0;
+ chip->slw_image_size = 0;
+ return;
+ }
+ printf("SLW: Image size from image: 0x%llx\n", chip->slw_image_size);
+
+ if (chip->slw_image_size > chip->slw_bar_size) {
+ log_simple_error(&e_info(OPAL_RC_SLW_INIT),
+ "SLW: Built-in image size larger than BAR size !\n");
+ /* XXX Panic ? */
+ }
+
+ /* Patch SLW image */
+ slw_patch_regs(chip);
+#endif /* __HAVE_LIBPORE__ */
+
+ /* At power ON setup inits for fast-sleep */
+ for_each_available_core_in_chip(c, chip->id) {
+ fastsleep_prepare_core(chip, c);
+ }
+}
+
+void slw_init(void)
+{
+ struct proc_chip *chip;
+
+ if (proc_gen != proc_gen_p8)
+ return;
+
+ for_each_chip(chip)
+ slw_init_chip(chip);
+}
+
diff --git a/hw/xscom.c b/hw/xscom.c
new file mode 100644
index 0000000..c4c3be2
--- /dev/null
+++ b/hw/xscom.c
@@ -0,0 +1,518 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+#include <xscom.h>
+#include <io.h>
+#include <processor.h>
+#include <device.h>
+#include <chip.h>
+#include <centaur.h>
+#include <fsp-elog.h>
+
+/* Mask of bits to clear in HMER before an access */
+#define HMER_CLR_MASK (~(SPR_HMER_XSCOM_FAIL | \
+ SPR_HMER_XSCOM_DONE | \
+ SPR_HMER_XSCOM_STATUS_MASK))
+
+#define XSCOM_ADDR_IND_FLAG PPC_BIT(0)
+#define XSCOM_ADDR_IND_ADDR_MASK PPC_BITMASK(12,31)
+#define XSCOM_ADDR_IND_ADDR_LSH PPC_BITLSHIFT(31)
+#define XSCOM_ADDR_IND_DATA_MSK PPC_BITMASK(48,63)
+
+#define XSCOM_DATA_IND_READ PPC_BIT(0)
+#define XSCOM_DATA_IND_COMPLETE PPC_BIT(32)
+#define XSCOM_DATA_IND_ERR_MASK PPC_BITMASK(33,35)
+#define XSCOM_DATA_IND_ERR_LSH PPC_BITLSHIFT(35)
+#define XSCOM_DATA_IND_DATA_MSK PPC_BITMASK(48,63)
+
+/* HB folks say: try 10 time for now */
+#define XSCOM_IND_MAX_RETRIES 10
+
+DEFINE_LOG_ENTRY(OPAL_RC_XSCOM_RW, OPAL_PLATFORM_ERR_EVT, OPAL_XSCOM,
+ OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_XSCOM_INDIRECT_RW, OPAL_PLATFORM_ERR_EVT, OPAL_XSCOM,
+ OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+DEFINE_LOG_ENTRY(OPAL_RC_XSCOM_RESET, OPAL_PLATFORM_ERR_EVT, OPAL_XSCOM,
+ OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
+ OPAL_NA, NULL);
+
+/*
+ * Locking notes:
+ *
+ * We used to have a per-target lock. However due to errata HW822317
+ * we can have issues on the issuer side if multiple threads try to
+ * send XSCOMs simultaneously (HMER responses get mixed up), so just
+ * use a global lock instead
+ */
+static struct lock xscom_lock = LOCK_UNLOCKED;
+
+static inline void *xscom_addr(uint32_t gcid, uint32_t pcb_addr)
+{
+ struct proc_chip *chip = get_chip(gcid);
+ uint64_t addr;
+
+ assert(chip);
+ addr = chip->xscom_base;
+ addr |= ((uint64_t)pcb_addr << 4) & ~0xfful;
+ addr |= (pcb_addr << 3) & 0x78;
+
+ return (void *)addr;
+}
+
+static uint64_t xscom_wait_done(void)
+{
+ uint64_t hmer;
+
+ do
+ hmer = mfspr(SPR_HMER);
+ while(!(hmer & SPR_HMER_XSCOM_DONE));
+
+ /*
+ * HW822317: We need to read a second time as the actual
+ * status can be delayed by 1 cycle after DONE
+ */
+ return mfspr(SPR_HMER);
+}
+
+static void xscom_reset(uint32_t gcid)
+{
+ u64 hmer;
+
+ /* Clear errors in HMER */
+ mtspr(SPR_HMER, HMER_CLR_MASK);
+
+ /* First we need to write 0 to a register on our chip */
+ out_be64(xscom_addr(this_cpu()->chip_id, 0x202000f), 0);
+ hmer = xscom_wait_done();
+ if (hmer & SPR_HMER_XSCOM_FAIL)
+ goto fail;
+
+ /* Then we need to clear those two other registers on the target */
+ out_be64(xscom_addr(gcid, 0x2020007), 0);
+ hmer = xscom_wait_done();
+ if (hmer & SPR_HMER_XSCOM_FAIL)
+ goto fail;
+ out_be64(xscom_addr(gcid, 0x2020009), 0);
+ hmer = xscom_wait_done();
+ if (hmer & SPR_HMER_XSCOM_FAIL)
+ goto fail;
+ return;
+ fail:
+ /* Fatal error resetting XSCOM */
+ log_simple_error(&e_info(OPAL_RC_XSCOM_RESET),
+ "XSCOM: Fatal error resetting engine after failed access !\n");
+
+ /* XXX Generate error log ? attn ? panic ?
+ * If we decide to panic, change the above severity to PANIC
+ */
+}
+
+static bool xscom_handle_error(uint64_t hmer, uint32_t gcid, uint32_t pcb_addr,
+ bool is_write)
+{
+ unsigned int stat = GETFIELD(SPR_HMER_XSCOM_STATUS, hmer);
+
+ /* XXX Figure out error codes from doc and error
+ * recovery procedures
+ */
+ switch(stat) {
+ /* XSCOM blocked, just retry */
+ case 1:
+ return true;
+ }
+
+ /* XXX: Create error log entry ? */
+ log_simple_error(&e_info(OPAL_RC_XSCOM_RW),
+ "XSCOM: %s error gcid=0x%x pcb_addr=0x%x stat=0x%x\n",
+ is_write ? "write" : "read", gcid, pcb_addr, stat);
+
+ /* We need to reset the XSCOM or we'll hang on the next access */
+ xscom_reset(gcid);
+
+ /* Non recovered ... just fail */
+ return false;
+}
+
+static void xscom_handle_ind_error(uint64_t data, uint32_t gcid,
+ uint64_t pcb_addr, bool is_write)
+{
+ unsigned int stat = GETFIELD(XSCOM_DATA_IND_ERR, data);
+ bool timeout = !(data & XSCOM_DATA_IND_COMPLETE);
+
+ /* XXX: Create error log entry ? */
+ if (timeout)
+ log_simple_error(&e_info(OPAL_RC_XSCOM_INDIRECT_RW),
+ "XSCOM: %s indirect timeout, gcid=0x%x pcb_addr=0x%llx"
+ " stat=0x%x\n",
+ is_write ? "write" : "read", gcid, pcb_addr, stat);
+ else
+ log_simple_error(&e_info(OPAL_RC_XSCOM_INDIRECT_RW),
+ "XSCOM: %s indirect error, gcid=0x%x pcb_addr=0x%llx"
+ " stat=0x%x\n",
+ is_write ? "write" : "read", gcid, pcb_addr, stat);
+}
+
+static bool xscom_gcid_ok(uint32_t gcid)
+{
+ return get_chip(gcid) != NULL;
+}
+
+/*
+ * Low level XSCOM access functions, perform a single direct xscom
+ * access via MMIO
+ */
+static int __xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val)
+{
+ uint64_t hmer;
+
+ if (!xscom_gcid_ok(gcid)) {
+ prerror("%s: invalid XSCOM gcid 0x%x\n", __func__, gcid);
+ return OPAL_PARAMETER;
+ }
+
+ for (;;) {
+ /* Clear status bits in HMER (HMER is special
+ * writing to it *ands* bits
+ */
+ mtspr(SPR_HMER, HMER_CLR_MASK);
+
+ /* Read value from SCOM */
+ *val = in_be64(xscom_addr(gcid, pcb_addr));
+
+ /* Wait for done bit */
+ hmer = xscom_wait_done();
+
+ /* Check for error */
+ if (!(hmer & SPR_HMER_XSCOM_FAIL))
+ break;
+
+ /* Handle error and eventually retry */
+ if (!xscom_handle_error(hmer, gcid, pcb_addr, false))
+ return OPAL_HARDWARE;
+ }
+ return 0;
+}
+
+static int __xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val)
+{
+ uint64_t hmer;
+
+ if (!xscom_gcid_ok(gcid)) {
+ prerror("%s: invalid XSCOM gcid 0x%x\n", __func__, gcid);
+ return OPAL_PARAMETER;
+ }
+
+ for (;;) {
+ /* Clear status bits in HMER (HMER is special
+ * writing to it *ands* bits
+ */
+ mtspr(SPR_HMER, HMER_CLR_MASK);
+
+ /* Write value to SCOM */
+ out_be64(xscom_addr(gcid, pcb_addr), val);
+
+ /* Wait for done bit */
+ hmer = xscom_wait_done();
+
+ /* Check for error */
+ if (!(hmer & SPR_HMER_XSCOM_FAIL))
+ break;
+
+ /* Handle error and eventually retry */
+ if (!xscom_handle_error(hmer, gcid, pcb_addr, true))
+ return OPAL_HARDWARE;
+ }
+ return 0;
+}
+
+/*
+ * Indirect XSCOM access functions
+ */
+static int xscom_indirect_read(uint32_t gcid, uint64_t pcb_addr, uint64_t *val)
+{
+ uint32_t addr;
+ uint64_t data;
+ int rc, retries;
+
+ if (proc_gen != proc_gen_p8) {
+ *val = (uint64_t)-1;
+ return OPAL_UNSUPPORTED;
+ }
+
+ /* Write indirect address */
+ addr = pcb_addr & 0x7fffffff;
+ data = XSCOM_DATA_IND_READ |
+ (pcb_addr & XSCOM_ADDR_IND_ADDR_MASK);
+ rc = __xscom_write(gcid, addr, data);
+ if (rc)
+ goto bail;
+
+ /* Wait for completion */
+ for (retries = 0; retries < XSCOM_IND_MAX_RETRIES; retries++) {
+ rc = __xscom_read(gcid, addr, &data);
+ if (rc)
+ goto bail;
+ if ((data & XSCOM_DATA_IND_COMPLETE) &&
+ ((data & XSCOM_DATA_IND_ERR_MASK) == 0)) {
+ *val = data & XSCOM_DATA_IND_DATA_MSK;
+ break;
+ }
+ if ((data & XSCOM_DATA_IND_COMPLETE) ||
+ (retries >= XSCOM_IND_MAX_RETRIES)) {
+ xscom_handle_ind_error(data, gcid, pcb_addr,
+ false);
+ rc = OPAL_HARDWARE;
+ goto bail;
+ }
+ }
+ bail:
+ if (rc)
+ *val = (uint64_t)-1;
+ return rc;
+}
+
+static int xscom_indirect_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val)
+{
+ uint32_t addr;
+ uint64_t data;
+ int rc, retries;
+
+ if (proc_gen != proc_gen_p8)
+ return OPAL_UNSUPPORTED;
+
+ /* Write indirect address & data */
+ addr = pcb_addr & 0x7fffffff;
+ data = pcb_addr & XSCOM_ADDR_IND_ADDR_MASK;
+ data |= val & XSCOM_ADDR_IND_DATA_MSK;
+
+ rc = __xscom_write(gcid, addr, data);
+ if (rc)
+ goto bail;
+
+ /* Wait for completion */
+ for (retries = 0; retries < XSCOM_IND_MAX_RETRIES; retries++) {
+ rc = __xscom_read(gcid, addr, &data);
+ if (rc)
+ goto bail;
+ if ((data & XSCOM_DATA_IND_COMPLETE) &&
+ ((data & XSCOM_DATA_IND_ERR_MASK) == 0))
+ break;
+ if ((data & XSCOM_DATA_IND_COMPLETE) ||
+ (retries >= XSCOM_IND_MAX_RETRIES)) {
+ xscom_handle_ind_error(data, gcid, pcb_addr,
+ false);
+ rc = OPAL_HARDWARE;
+ goto bail;
+ }
+ }
+ bail:
+ return rc;
+}
+
+static uint32_t xscom_decode_chiplet(uint32_t partid, uint64_t *pcb_addr)
+{
+ uint32_t gcid = (partid & 0x0fffffff) >> 4;
+ uint32_t core = partid & 0xf;
+
+ *pcb_addr |= P8_EX_PCB_SLAVE_BASE;
+ *pcb_addr |= core << 24;
+
+ return gcid;
+}
+
+/*
+ * External API
+ */
+int xscom_read(uint32_t partid, uint64_t pcb_addr, uint64_t *val)
+{
+ bool need_unlock;
+ uint32_t gcid;
+ int rc;
+
+ /* Handle part ID decoding */
+ switch(partid >> 28) {
+ case 0: /* Normal processor chip */
+ gcid = partid;
+ break;
+ case 8: /* Centaur */
+ return centaur_xscom_read(partid, pcb_addr, val);
+ case 4: /* EX chiplet */
+ gcid = xscom_decode_chiplet(partid, &pcb_addr);
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /*
+ * HW822317 requires locking. We use a recursive lock as error
+ * conditions might cause printf's which might then try to take
+ * the lock again
+ */
+ need_unlock = lock_recursive(&xscom_lock);
+
+ /* Direct vs indirect access */
+ if (pcb_addr & XSCOM_ADDR_IND_FLAG)
+ rc = xscom_indirect_read(gcid, pcb_addr, val);
+ else
+ rc = __xscom_read(gcid, pcb_addr & 0x7fffffff, val);
+
+ /* Unlock it */
+ if (need_unlock)
+ unlock(&xscom_lock);
+ return rc;
+}
+
+opal_call(OPAL_XSCOM_READ, xscom_read, 3);
+
+int xscom_write(uint32_t partid, uint64_t pcb_addr, uint64_t val)
+{
+ bool need_unlock;
+ uint32_t gcid;
+ int rc;
+
+ /* Handle part ID decoding */
+ switch(partid >> 28) {
+ case 0: /* Normal processor chip */
+ gcid = partid;
+ break;
+ case 8: /* Centaur */
+ return centaur_xscom_write(partid, pcb_addr, val);
+ case 4: /* EX chiplet */
+ gcid = xscom_decode_chiplet(partid, &pcb_addr);
+ break;
+ default:
+ return OPAL_PARAMETER;
+ }
+
+ /*
+ * HW822317 requires locking. We use a recursive lock as error
+ * conditions might cause printf's which might then try to take
+ * the lock again
+ */
+ need_unlock = lock_recursive(&xscom_lock);
+
+ /* Direct vs indirect access */
+ if (pcb_addr & XSCOM_ADDR_IND_FLAG)
+ rc = xscom_indirect_write(gcid, pcb_addr, val);
+ else
+ rc = __xscom_write(gcid, pcb_addr & 0x7fffffff, val);
+
+ /* Unlock it */
+ if (need_unlock)
+ unlock(&xscom_lock);
+ return rc;
+}
+opal_call(OPAL_XSCOM_WRITE, xscom_write, 3);
+
+int xscom_readme(uint64_t pcb_addr, uint64_t *val)
+{
+ return xscom_read(this_cpu()->chip_id, pcb_addr, val);
+}
+
+int xscom_writeme(uint64_t pcb_addr, uint64_t val)
+{
+ return xscom_write(this_cpu()->chip_id, pcb_addr, val);
+}
+
+static void xscom_init_chip_info(struct proc_chip *chip)
+{
+ uint64_t val;
+ int64_t rc;
+
+ rc = xscom_read(chip->id, 0xf000f, &val);
+ if (rc) {
+ prerror("XSCOM: Error %lld reading 0xf000f register\n", rc);
+ /* We leave chip type to UNKNOWN */
+ return;
+ }
+
+ /* Extract CFAM id */
+ val >>= 44;
+
+ /* Identify chip */
+ switch(val & 0xff) {
+ case 0xf9:
+ chip->type = PROC_CHIP_P7;
+ assert(proc_gen == proc_gen_p7);
+ break;
+ case 0xe8:
+ chip->type = PROC_CHIP_P7P;
+ assert(proc_gen == proc_gen_p7);
+ break;
+ case 0xef:
+ chip->type = PROC_CHIP_P8_MURANO;
+ assert(proc_gen == proc_gen_p8);
+ break;
+ case 0xea:
+ chip->type = PROC_CHIP_P8_VENICE;
+ assert(proc_gen == proc_gen_p8);
+ break;
+ default:
+ printf("CHIP: Unknown chip type 0x%02x !!!\n",
+ (unsigned char)(val & 0xff));
+ }
+
+ /* Get EC level from CFAM ID */
+ chip->ec_level = ((val >> 16) & 0xf) << 4;
+ chip->ec_level |= (val >> 8) & 0xf;
+}
+
+void xscom_init(void)
+{
+ struct dt_node *xn;
+
+ dt_for_each_compatible(dt_root, xn, "ibm,xscom") {
+ uint32_t gcid = dt_get_chip_id(xn);
+ const struct dt_property *reg;
+ struct proc_chip *chip;
+ const char *chip_name;
+ static const char *chip_names[] = {
+ "UNKNOWN", "P7", "P7+", "P8E", "P8",
+ };
+
+ chip = get_chip(gcid);
+ assert(chip);
+
+ /* XXX We need a proper address parsing. For now, we just
+ * "know" that we are looking at a u64
+ */
+ reg = dt_find_property(xn, "reg");
+ assert(reg);
+
+ chip->xscom_base = dt_translate_address(xn, 0, NULL);
+
+ /* Grab processor type and EC level */
+ xscom_init_chip_info(chip);
+
+ chip_name = chip->type > PROC_CHIP_P8_VENICE ? "INVALID" :
+ chip_names[chip->type];
+ printf("XSCOM: chip 0x%x at 0x%llx [%s DD%x.%x]\n",
+ gcid, chip->xscom_base,
+ chip_name,
+ chip->ec_level >> 4,
+ chip->ec_level & 0xf);
+ }
+}
+
+void xscom_used_by_console(void)
+{
+ xscom_lock.in_con_path = true;
+}
diff --git a/include/affinity.h b/include/affinity.h
new file mode 100644
index 0000000..eba26db
--- /dev/null
+++ b/include/affinity.h
@@ -0,0 +1,34 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ *
+ * All functions in charge of generating the associativity/affinity
+ * properties in the device-tree
+ */
+
+#ifndef __AFFINITY_H
+#define __AFFINITY_H
+
+struct dt_node;
+struct cpu_thread;
+
+extern void add_associativity_ref_point(void);
+
+extern void add_chip_dev_associativity(struct dt_node *dev);
+extern void add_core_associativity(struct cpu_thread *cpu);
+
+#endif /* __AFFINITY_H */
diff --git a/include/asm-utils.h b/include/asm-utils.h
new file mode 100644
index 0000000..503f2cc
--- /dev/null
+++ b/include/asm-utils.h
@@ -0,0 +1,45 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ASM_UTILS_H
+#define __ASM_UTILS_H
+
+/*
+ * Do NOT use the immediate load helpers with symbols
+ * only with constants. Symbols will _not_ be resolved
+ * by the linker since we are building -pie, and will
+ * instead generate relocs of a type our little built-in
+ * relocator can't handle
+ */
+
+/* Load an immediate 64-bit value into a register */
+#define LOAD_IMM64(r, e) \
+ lis r,(e)@highest; \
+ ori r,r,(e)@higher; \
+ rldicr r,r, 32, 31; \
+ oris r,r, (e)@h; \
+ ori r,r, (e)@l;
+
+/* Load an immediate 32-bit value into a register */
+#define LOAD_IMM32(r, e) \
+ lis r,(e)@h; \
+ ori r,r,(e)@l;
+
+/* Load an address via the TOC */
+#define LOAD_ADDR_FROM_TOC(r, e) ld r,e@got(%r2)
+
+
+#endif /* __ASM_UTILS_H */
diff --git a/include/ast.h b/include/ast.h
new file mode 100644
index 0000000..17ee17e
--- /dev/null
+++ b/include/ast.h
@@ -0,0 +1,78 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __AST_H
+#define __AST_H
+
+/*
+ * AHB bus registers
+ */
+
+/* SPI Flash controller #1 (BMC) */
+#define BMC_SPI_FCTL_BASE 0x1E620000
+#define BMC_SPI_FCTL_CTRL (BMC_SPI_FCTL_BASE + 0x10)
+#define BMC_FLASH_BASE 0x20000000
+
+/* SPI Flash controller #2 (PNOR) */
+#define PNOR_SPI_FCTL_BASE 0x1E630000
+#define PNOR_SPI_FCTL_CONF (PNOR_SPI_FCTL_BASE + 0x00)
+#define PNOR_SPI_FCTL_CTRL (PNOR_SPI_FCTL_BASE + 0x04)
+#define PNOR_FLASH_BASE 0x30000000
+
+/* LPC registers */
+#define LPC_BASE 0x1e789000
+#define LPC_HICR6 (LPC_BASE + 0x80)
+#define LPC_HICR7 (LPC_BASE + 0x88)
+#define LPC_HICR8 (LPC_BASE + 0x8c)
+
+/*
+ * AHB Accessors
+ */
+#ifndef __SKIBOOT__
+#include "io.h"
+#else
+
+/*
+ * Register accessors, return byteswapped values
+ * (IE. LE registers)
+ */
+void ast_ahb_writel(uint32_t val, uint32_t reg);
+uint32_t ast_ahb_readl(uint32_t reg);
+
+/*
+ * copy to/from accessors. Cannot cross IDSEL boundaries (256M)
+ */
+int ast_copy_to_ahb(uint32_t reg, const void *src, uint32_t len);
+int ast_copy_from_ahb(void *dst, uint32_t reg, uint32_t len);
+
+void ast_io_init(void);
+
+/* UART init */
+void ast_setup_uart1(uint16_t io_base, uint8_t irq);
+
+#endif /* __SKIBOOT__ */
+
+/*
+ * SPI Flash controllers
+ */
+#define AST_SF_TYPE_PNOR 0
+#define AST_SF_TYPE_BMC 1
+
+struct spi_flash_ctrl;
+int ast_sf_open(uint8_t type, struct spi_flash_ctrl **ctrl);
+void ast_sf_close(struct spi_flash_ctrl *ctrl);
+
+
+#endif /* __AST_H */
diff --git a/include/bitutils.h b/include/bitutils.h
new file mode 100644
index 0000000..7e5b6eb
--- /dev/null
+++ b/include/bitutils.h
@@ -0,0 +1,50 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BITUTILS_H
+#define __BITUTILS_H
+
+/* PPC bit number conversion */
+#ifdef __ASSEMBLY__
+#define PPC_BIT(bit) (0x8000000000000000 >> (bit))
+#define PPC_BIT32(bit) (0x80000000 >> (bit))
+#define PPC_BIT8(bit) (0x80 >> (bit))
+#else
+#define PPC_BIT(bit) (0x8000000000000000UL >> (bit))
+#define PPC_BIT32(bit) (0x80000000UL >> (bit))
+#define PPC_BIT8(bit) (0x80UL >> (bit))
+#endif
+#define PPC_BITMASK(bs,be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+#define PPC_BITMASK32(bs,be) ((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
+#define PPC_BITLSHIFT(be) (63 - (be))
+#define PPC_BITLSHIFT32(be) (31 - (be))
+
+/*
+ * PPC bitmask field manipulation
+ */
+
+/* Extract field fname from val */
+#define GETFIELD(fname, val) \
+ (((val) & fname##_MASK) >> fname##_LSH)
+
+/* Set field fname of oval to fval
+ * NOTE: oval isn't modified, the combined result is returned
+ */
+#define SETFIELD(fname, oval, fval) \
+ (((oval) & ~fname##_MASK) | \
+ ((((typeof(oval))(fval)) << fname##_LSH) & fname##_MASK))
+
+#endif /* __BITUTILS_H */
diff --git a/include/capp.h b/include/capp.h
new file mode 100644
index 0000000..ddf280d
--- /dev/null
+++ b/include/capp.h
@@ -0,0 +1,62 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+struct capp_ucode_lid_hdr {
+ uint64_t eyecatcher; /* 'CAPPULID' in ASCII */
+ uint64_t version;
+ uint64_t data_size; /* total size of all capp microcode data following header */
+ u8 reserved[40]; /* zeroed, pads to 64 byte boundary */
+};
+
+struct capp_ucode_data_hdr
+{
+ uint64_t eyecatcher; /* 'CAPPUCOD' in ASCII */
+ u8 version;
+ u8 reg;
+ u8 reserved[2];
+ uint32_t num_data_chunks; /* Number of 8-byte chunks of data that follow this header */
+};
+
+enum capp_reg {
+ apc_master_cresp = 0x1,
+ apc_master_uop_table = 0x2,
+ snp_ttype = 0x3,
+ snp_uop_table = 0x4,
+ apt_master_capi_ctrl = 0x5,
+ snoop_capi_cnfg = 0x6,
+ canned_presp_map0 = 0x7,
+ canned_presp_map1 = 0x8,
+ canned_presp_map2 = 0x9,
+ flush_sue_state_map = 0xA,
+ apc_master_powerbus_ctrl = 0xB
+};
+
+#define CAPP_SNP_ARRAY_ADDR_REG 0x2013028
+#define CAPP_APC_MASTER_ARRAY_ADDR_REG 0x201302A
+#define CAPP_SNP_ARRAY_WRITE_REG 0x2013801
+#define CAPP_APC_MASTER_ARRAY_WRITE_REG 0x2013802
+
+#define APC_MASTER_PB_CTRL 0x2013018
+#define APC_MASTER_CONFIG 0x2013019
+#define TRANSPORT_CONTROL 0x201301C
+#define CANNED_PRESP_MAP0 0x201301D
+#define CANNED_PRESP_MAP1 0x201301E
+#define CANNED_PRESP_MAP2 0x201301F
+#define CAPP_ERR_STATUS_CTRL 0x201300E
+#define FLUSH_SUE_STATE_MAP 0x201300F
+#define CAPP_EPOCH_TIMER_CTRL 0x201302C
+#define FLUSH_UOP_CONFIG1 0x2013803
+#define FLUSH_UOP_CONFIG2 0x2013804
+#define SNOOP_CAPI_CONFIG 0x201301A
diff --git a/include/cec.h b/include/cec.h
new file mode 100644
index 0000000..87cdc0e
--- /dev/null
+++ b/include/cec.h
@@ -0,0 +1,51 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CEC_H
+#define __CEC_H
+
+#include <stdint.h>
+
+/* This represent an IO Hub and contains the function pointers
+ * for the IO Hub related OPAL ops and other internal functions
+ */
+
+struct io_hub;
+
+struct io_hub_ops {
+ /* OPAL_PCI_SET_HUB_TCE_MEMORY (p5ioc2 only) */
+ int64_t (*set_tce_mem)(struct io_hub *hub, uint64_t address,
+ uint64_t size);
+
+ /* OPAL_PCI_GET_HUB_DIAG_DATA */
+ int64_t (*get_diag_data)(struct io_hub *hub, void *diag_buffer,
+ uint64_t diag_buffer_len);
+
+ /* Called on fast reset */
+ void (*reset)(struct io_hub *hub);
+};
+
+struct io_hub {
+ uint32_t hub_id;
+ const struct io_hub_ops *ops;
+};
+
+extern struct io_hub *cec_get_hub_by_id(uint32_t hub_id);
+
+extern void cec_reset(void);
+extern void cec_register(struct io_hub *hub);
+
+#endif /* __CEC_H */
diff --git a/include/centaur.h b/include/centaur.h
new file mode 100644
index 0000000..4a5e52c
--- /dev/null
+++ b/include/centaur.h
@@ -0,0 +1,24 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CENTAUR_H
+#define __CENTAUR_H
+
+extern int64_t centaur_xscom_read(uint32_t id, uint64_t pcb_addr, uint64_t *val);
+extern int64_t centaur_xscom_write(uint32_t id, uint64_t pcb_addr, uint64_t val);
+extern void centaur_init(void);
+
+#endif /* __CENTAUR_H */
diff --git a/include/chip.h b/include/chip.h
new file mode 100644
index 0000000..10623e6
--- /dev/null
+++ b/include/chip.h
@@ -0,0 +1,151 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CHIP_H
+#define __CHIP_H
+
+#include <stdint.h>
+#include <lock.h>
+
+/*
+ * Note on chip IDs:
+ *
+ * We carry a "chip_id" around, in the cpu_thread, but also as
+ * ibm,chip-id properties.
+ *
+ * This ID is the HW fabric ID of a chip based on the XSCOM numbering,
+ * also known as "GCID" (Global Chip ID).
+ *
+ * The format of this number is different between P7 and P8 and care must
+ * be taken when trying to convert between this chip ID and some other
+ * representation such as PIR values, interrupt-server numbers etc... :
+ *
+ * P7 GCID
+ * -------
+ *
+ * Global chip ID is a 6 bit number:
+ *
+ * NodeID T ChipID
+ * | | | |
+ * |___|___|___|___|___|___|
+ *
+ * Where T is the "torrent" bit and is 0 for P7 chips and 1 for
+ * directly XSCOM'able IO chips such as Torrent
+ *
+ * This macro converts a PIR to a GCID
+ */
+#define P7_PIR2GCID(pir) ({ \
+ uint32_t _pir = pir; \
+ ((_pir >> 4) & 0x38) | ((_pir >> 5) & 0x3); })
+
+#define P7_PIR2COREID(pir) (((pir) >> 2) & 0x7)
+
+#define P7_PIR2THREADID(pir) ((pir) & 0x3)
+
+/*
+ * P8 GCID
+ * -------
+ *
+ * Global chip ID is a 6 bit number:
+ *
+ * NodeID ChipID
+ * | | |
+ * |___|___|___|___|___|___|
+ *
+ * The difference with P7 is the absence of T bit, the ChipID
+ * is 3 bits long. The GCID is thus the same as the high bits
+ * if the PIR
+ */
+#define P8_PIR2GCID(pir) (((pir) >> 7) & 0x3f)
+
+#define P8_PIR2COREID(pir) (((pir) >> 3) & 0xf)
+
+#define P8_PIR2THREADID(pir) ((pir) & 0x7)
+
+struct dt_node;
+struct centaur_chip;
+
+/* Chip type */
+enum proc_chip_type {
+ PROC_CHIP_UNKNOWN,
+ PROC_CHIP_P7,
+ PROC_CHIP_P7P,
+ PROC_CHIP_P8_MURANO,
+ PROC_CHIP_P8_VENICE,
+};
+
+#define MAX_CHIPS (1 << 6) /* 6-bit chip ID */
+
+/*
+ * For each chip in the system, we maintain this structure
+ *
+ * This contains fields used by different modules including
+ * modules in hw/ but is handy to keep per-chip data
+ */
+struct proc_chip {
+ uint32_t id; /* HW Chip ID (GCID) */
+ struct dt_node *devnode; /* "xscom" chip node */
+
+ /* These are only initialized after xcom_init */
+ enum proc_chip_type type;
+ uint32_t ec_level; /* 0xMm (DD1.0 = 0x10) */
+
+ /* Those two values are only populated on machines with an FSP
+ * dbob_id = Drawer/Block/Octant/Blade (DBOBID)
+ * pcid = HDAT processor_chip_id
+ */
+ uint32_t dbob_id;
+ uint32_t pcid;
+
+ /* Used by hw/xscom.c */
+ uint64_t xscom_base;
+
+ /* Used by hw/lpc.c */
+ uint32_t lpc_xbase;
+ struct lock lpc_lock;
+ uint8_t lpc_fw_idsel;
+ uint8_t lpc_fw_rdsz;
+
+ /* Used by hw/slw.c */
+ uint64_t slw_base;
+ uint64_t slw_bar_size;
+ uint64_t slw_image_size;
+
+ /* Used by hw/homer.c */
+ uint64_t homer_base;
+ uint64_t homer_size;
+ uint64_t occ_common_base;
+ uint64_t occ_common_size;
+
+ /* Used by hw/centaur.c */
+ struct centaur_chip *centaurs;
+};
+
+extern uint32_t pir_to_chip_id(uint32_t pir);
+extern uint32_t pir_to_core_id(uint32_t pir);
+extern uint32_t pir_to_thread_id(uint32_t pir);
+
+extern struct proc_chip *next_chip(struct proc_chip *chip);
+
+#define for_each_chip(__c) for (__c=next_chip(NULL); __c; __c=next_chip(__c))
+
+extern struct proc_chip *get_chip(uint32_t chip_id);
+
+extern void init_chips(void);
+
+
+#endif /* __CHIP_H */
+
diff --git a/include/chiptod.h b/include/chiptod.h
new file mode 100644
index 0000000..ef34927
--- /dev/null
+++ b/include/chiptod.h
@@ -0,0 +1,28 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CHIPTOD_H
+#define __CHIPTOD_H
+
+/* The ChipTOD is the HW facility that maintains a synchronized
+ * time base across the fabric.
+ */
+
+extern void chiptod_init(u32 master_cpu);
+
+extern bool chiptod_wakeup_resync(void);
+
+#endif /* __CHIPTOD_H */
diff --git a/include/codeupdate.h b/include/codeupdate.h
new file mode 100644
index 0000000..04f5471
--- /dev/null
+++ b/include/codeupdate.h
@@ -0,0 +1,236 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __CODEUPDATE_H
+#define __CODEUPDATE_H
+
+/* Flash SG list version */
+#define SG_LIST_VERSION (1UL)
+
+/* LID size <= 16M */
+#define LID_MAX_SIZE 0x1000000
+
+/* Delete all LIDs in */
+#define DEL_UPD_SIDE_LIDS 0xFFFFFFFF
+
+/* System parameter values used in code update validation */
+#define INBAND_UPDATE_ALLOWED 0x01
+#define PLATFORM_HMC_MANAGED 0x01
+#define FW_LICENSE_ACCEPT 0x01
+
+/* Running image side */
+#define FW_IPL_SIDE_TEMP 0x01
+#define FW_IPL_SIDE_PERM 0x00
+
+/* Manage operations */
+#define OPAL_REJECT_TMP_SIDE 0
+#define OPAL_COMMIT_TMP_SIDE 1
+
+/* Validate image size */
+#define VALIDATE_BUF_SIZE 4096
+
+/* Code update operation status */
+#define OPAL_INVALID_IMAGE -1003 /* Unacceptable image */
+#define OPAL_ACTIVE_SIDE_ERR -9001
+#define OPAL_FLASH_NO_AUTH -9002
+
+/* Validate image update result tokens */
+#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */
+#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */
+#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */
+#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */
+/*
+ * Current T side will be committed to P side before being replace with new
+ * image, and the new image is downlevel from current image
+ */
+#define VALIDATE_TMP_COMMIT_DL 4
+/*
+ * Current T side will be committed to P side before being replaced with new
+ * image
+ */
+#define VALIDATE_TMP_COMMIT 5
+/*
+ * T side will be updated with a downlevel image
+ */
+#define VALIDATE_TMP_UPDATE_DL 6
+/*
+ * The candidate image's release date is later than the system's firmware
+ * service entitlement date - service warranty period has expired
+ */
+#define VALIDATE_OUT_OF_WRNTY 7
+
+/* default version */
+#define FW_VERSION_UNKNOWN "UNKNOWN"
+
+/* Actual size of MI & ML keyword including NULL */
+#define MI_KEYWORD_SIZE 10
+#define ML_KEYWORD_SIZE 9
+
+/* Firmware image VPD data */
+struct fw_image_vpd {
+ char MI_keyword[MI_KEYWORD_SIZE]; /* NNSSS_FFF */
+ char ext_fw_id[ML_KEYWORD_SIZE]; /* FWxxx.yy */
+};
+
+/* Master LID header */
+struct master_lid_header {
+ char key[3]; /* "MLH" */
+ uint8_t version; /* 0x02 */
+ uint16_t headerSize;
+ uint16_t entrySize;
+ uint8_t reserved[56];
+};
+
+/* LID index entry */
+struct lid_index_entry {
+ uint32_t id;
+ uint32_t size;
+ uint32_t offset;
+ uint32_t crc;
+};
+
+/* SP flags */
+#define FW_ONE_OFF_SP 0x80000000
+#define FW_EMERGENCY_SP 0x40000000
+
+/*
+ * SP GA date
+ *
+ * sp_flag addr = header->data + header->ext_fw_id_size
+ */
+struct update_image_ga_date {
+ uint32_t sp_flag;
+ char sp_ga_date[8]; /* YYYYMMDD */
+};
+
+/* Image magic number */
+#define IMAGE_MAGIC_NUMBER 0x5549
+
+/* Image header structure */
+struct update_image_header {
+ uint16_t magic;
+ uint16_t version;
+ uint32_t package_size;
+ uint32_t crc;
+ uint16_t lid_index_offset;
+ uint16_t number_lids;
+ uint16_t package_flags;
+ uint16_t MI_keyword_size;
+ char MI_keyword_data[40];
+ uint16_t ext_fw_id_size;
+ /* Rest of the image data including ext fw id, sp flags */
+ char data[];
+};
+
+/* FipS header */
+struct fips_header {
+ uint16_t magic;
+ uint16_t version;
+ uint32_t lid_id;
+ uint32_t lid_date; /* YYYYMMDD */
+ uint16_t lid_time; /* HHMM */
+ uint16_t lid_class;
+ uint32_t crc;
+ uint32_t lid_size; /* Number of bytes below header */
+ uint32_t header_size;
+ uint8_t mtd_number;
+ uint8_t valid; /* 1 = valid, 0 = invalid */
+ uint8_t reserved;
+ uint8_t lid_info_size;
+ char lid_info[64]; /* code level */
+ uint32_t update_date; /* YYYYMMDD */
+ uint16_t update_time; /* HHMM */
+ uint16_t phylum_len;
+ uint8_t lid_phylum[];
+};
+
+/* Approximate LID size */
+#define MASTER_LID_SIZE 0x5000
+/*
+ * Note:
+ * Doc indicates non-SP LIDs size is 0-8MB. However
+ * in reality marker LID size less than 4k. Allocating
+ * 8k to give some breathing space.
+ */
+#define MARKER_LID_SIZE 0x00002000
+
+/* Common marker LID no */
+#define P_COM_MARKER_LID_ID 0x80A00001
+#define T_COM_MARKER_LID_ID (P_COM_MARKER_LID_ID | ADJUST_T_SIDE_LID_NO)
+
+/*
+ * Common marker LID structure
+ *
+ * Note that we are populating only required sections,
+ * not all ADF sections in common marker LID.
+ */
+struct com_marker_header {
+ uint32_t version;
+ uint32_t MI_offset; /* Offset to MI section */
+ uint32_t iseries_offset;
+};
+
+/* MI Keyword section */
+struct com_marker_mi_section {
+ uint32_t MI_size;
+ char MI_keyword[40]; /* MI Keyword */
+ char lst_disrupt_fix_lvl[3];
+ char skip[21]; /* Skip not interested fields */
+ uint32_t adf_offset; /* Offset to ADF section */
+};
+
+/* Additional Data Fields */
+struct com_marker_adf_sec {
+ uint32_t adf_cnt; /* ADF count */
+ char adf_data[]; /* ADF data */
+};
+
+/* ADF common header */
+struct com_marker_adf_header {
+ uint32_t size; /* Section size */
+ uint32_t name; /* Section name */
+};
+
+/*
+ * Service Pack Nomenclature ADF
+ *
+ * Service pack release name.
+ */
+#define ADF_NAME_SP 0x53504E4D /* SPNM */
+struct com_marker_adf_sp
+{
+ struct com_marker_adf_header header;
+ uint32_t sp_name_offset; /* Offset from start of ADF */
+ uint32_t sp_name_size;
+ uint32_t skip[4]; /* Skip rest of fields */
+};
+
+/*
+ * Firmware IP Protection ADF
+ *
+ * Service Pack flags and GA date.
+ */
+#define ADF_NAME_FW_IP 0x46495050 /* FIPP */
+struct com_marker_fw_ip {
+ struct com_marker_adf_header header;
+ uint32_t sp_flag_offset; /* Offset from start of ADF */
+ uint32_t sp_flag_size;
+ uint32_t sp_ga_offset; /* Offset from start of ADF*/
+ uint32_t sp_ga_size;
+};
+
+#endif /* __CODEUPDATE_H */
diff --git a/include/compiler.h b/include/compiler.h
new file mode 100644
index 0000000..c1557d8
--- /dev/null
+++ b/include/compiler.h
@@ -0,0 +1,50 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __COMPILER_H
+#define __COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#include <stddef.h>
+
+/* Macros for various compiler bits and pieces */
+#define __packed __attribute__((packed))
+#define __align(x) __attribute__((__aligned__(x)))
+#define __unused __attribute__((unused))
+#define __used __attribute__((used))
+#define __section(x) __attribute__((__section__(x)))
+#define __noreturn __attribute__((noreturn))
+/* not __const as this has a different meaning (const) */
+#define __attrconst __attribute__((const))
+
+#if 0 /* Provided by gcc stddef.h */
+#define offsetof(type,m) __builtin_offsetof(type,m)
+#endif
+
+/* Compiler barrier */
+static inline void barrier(void)
+{
+ asm volatile("" : : : "memory");
+}
+
+#endif /* __ASSEMBLY__ */
+
+/* Stringification macro */
+#define __tostr(x) #x
+#define tostr(x) __tostr(x)
+
+#endif /* __COMPILER_H */
diff --git a/include/config.h b/include/config.h
new file mode 100644
index 0000000..78b63fe
--- /dev/null
+++ b/include/config.h
@@ -0,0 +1,93 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONFIG_H
+#define __CONFIG_H
+
+#define HAVE_TYPEOF 1
+#define HAVE_BUILTIN_TYPES_COMPATIBLE_P 1
+
+/* Keep -Wundef happy by defining whatever isn't on commandline to 0 */
+#if defined(HAVE_LITTLE_ENDIAN) && HAVE_LITTLE_ENDIAN
+#define HAVE_BIG_ENDIAN 0
+#endif
+#if defined(HAVE_BIG_ENDIAN) && HAVE_BIG_ENDIAN
+#define HAVE_LITTLE_ENDIAN 0
+#endif
+
+/* We don't have a byteswap.h, and thus no bswap_64 */
+#define HAVE_BYTESWAP_H 0
+#define HAVE_BSWAP_64 0
+
+/*
+ * Build options.
+ */
+
+/* Enable lock debugging */
+#define DEBUG_LOCKS 1
+
+/* Enable malloc debugging */
+#define DEBUG_MALLOC 1
+
+/* Enable OPAL entry point tracing */
+//#define OPAL_TRACE_ENTRY 1
+
+/* Enable tracing of event state change */
+//#define OPAL_TRACE_EVT_CHG 1
+
+/* Enable various levels of OPAL_console debug */
+//#define OPAL_DEBUG_CONSOLE_IO 1
+//#define OPAL_DEBUG_CONSOLE_POLL 1
+
+/* Enable this for mambo console */
+//#define MAMBO_CONSOLE 1
+
+/* Enable this to hookup SkiBoot log to the DVS console */
+#define DVS_CONSOLE 1
+
+/* Enable this to force the dummy console to the kernel.
+ * (ie, an OPAL console that injects into skiboot own console)
+ * Where possible, leave this undefined and enable it dynamically using
+ * the chosen->sapphire,enable-dummy-console in the device tree.
+ *
+ * Note: This only gets enabled if there is no FSP console. If there
+ * is one it always takes over for now. This also cause the LPC UART
+ * node to be marked "reserved" so Linux doesn't instanciate a 8250
+ * driver for it.
+ */
+//#define FORCE_DUMMY_CONSOLE 1
+
+/* Enable this to do fast resets. Currently unreliable... */
+//#define ENABLE_FAST_RESET 1
+
+/* Enable this to make fast reboot clear memory */
+//#define FAST_REBOOT_CLEARS_MEMORY 1
+
+/* Enable this to disable setting of the output pending event when
+ * sending things on the console. The FSP is very slow to consume
+ * and older kernels wait after each character during early boot so
+ * things get very slow. Eventually, we may want to create an OPAL
+ * API for the kernel to activate or deactivate that functionality
+ */
+#define DISABLE_CON_PENDING_EVT 1
+
+/* Configure this to provide some additional kernel command line
+ * arguments to the bootloader
+ */
+//#define KERNEL_COMMAND_LINE "debug"
+
+#endif /* __CONFIG_H */
+
diff --git a/include/console.h b/include/console.h
new file mode 100644
index 0000000..791f770
--- /dev/null
+++ b/include/console.h
@@ -0,0 +1,68 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONSOLE_H
+#define __CONSOLE_H
+
+#include <lock.h>
+
+/*
+ * Our internal console uses the format of BML new-style in-memory
+ * console and supports input for setups without a physical console
+ * facility or FSP.
+ *
+ * (This is v3 of the format, the previous one sucked)
+ */
+struct memcons {
+ uint64_t magic;
+#define MEMCONS_MAGIC 0x6630696567726173
+ uint64_t obuf_phys;
+ uint64_t ibuf_phys;
+ uint32_t obuf_size;
+ uint32_t ibuf_size;
+ uint32_t out_pos;
+#define MEMCONS_OUT_POS_WRAP 0x80000000u
+#define MEMCONS_OUT_POS_MASK 0x00ffffffu
+ uint32_t in_prod;
+ uint32_t in_cons;
+};
+
+extern struct memcons memcons;
+
+#define INMEM_CON_IN_LEN 16
+#define INMEM_CON_OUT_LEN (INMEM_CON_LEN - INMEM_CON_IN_LEN)
+
+/* Console driver */
+struct con_ops {
+ size_t (*write)(const char *buf, size_t len);
+ size_t (*read)(char *buf, size_t len);
+};
+
+extern struct lock con_lock;
+
+extern bool dummy_console_enabled(void);
+extern void force_dummy_console(void);
+extern bool flush_console(void);
+extern bool __flush_console(void);
+extern void set_console(struct con_ops *driver);
+
+extern void clear_console(void);
+extern void memcons_add_properties(void);
+extern void dummy_console_add_nodes(void);
+
+extern bool uart_console_poll(void);
+
+#endif /* __CONSOLE_H */
diff --git a/include/cpu.h b/include/cpu.h
new file mode 100644
index 0000000..7ea88b6
--- /dev/null
+++ b/include/cpu.h
@@ -0,0 +1,207 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CPU_H
+#define __CPU_H
+
+#include <processor.h>
+#include <ccan/list/list.h>
+#include <lock.h>
+#include <device.h>
+#include <opal.h>
+
+/*
+ * cpu_thread is our internal structure representing each
+ * thread in the system
+ */
+
+enum cpu_thread_state {
+ cpu_state_no_cpu = 0, /* Nothing there */
+ cpu_state_unknown, /* In PACA, not called in yet */
+ cpu_state_unavailable, /* Not available */
+ cpu_state_present, /* Assumed to spin in asm entry */
+ cpu_state_active, /* Secondary called in */
+ cpu_state_os, /* Under OS control */
+ cpu_state_disabled, /* Disabled by us due to error */
+ cpu_state_rvwinkle, /* Doing an rvwinkle cycle */
+};
+
+struct cpu_job;
+
+struct cpu_thread {
+ uint32_t pir;
+ uint32_t server_no;
+ uint32_t chip_id;
+ bool is_secondary;
+ bool current_hile;
+ struct cpu_thread *primary;
+ enum cpu_thread_state state;
+ struct dt_node *node;
+ struct opal_machine_check_event mc_event;
+ struct trace_info *trace;
+ uint64_t save_r1;
+ void *icp_regs;
+ uint32_t con_suspend;
+ bool con_need_flush;
+ uint32_t hbrt_spec_wakeup; /* primary only */
+
+ struct lock job_lock;
+ struct list_head job_queue;
+};
+
+/* This global is set to 1 to allow secondaries to callin,
+ * typically set after the primary has allocated the cpu_thread
+ * array and stacks
+ */
+extern unsigned long cpu_secondary_start;
+
+/* Max PIR in the system */
+extern unsigned int cpu_max_pir;
+
+/* Max # of threads per core */
+extern unsigned int cpu_thread_count;
+
+/* Boot CPU. */
+extern struct cpu_thread *boot_cpu;
+
+/* Initialize CPUs */
+void pre_init_boot_cpu(void);
+void init_boot_cpu(void);
+void init_all_cpus(void);
+
+/* This brings up our secondaries */
+extern void cpu_bringup(void);
+
+/* This is called by secondaries as they call in */
+extern void cpu_callin(struct cpu_thread *cpu);
+
+/* For cpus which fail to call in. */
+extern void cpu_remove_node(const struct cpu_thread *t);
+
+/* Find CPUs using different methods */
+extern struct cpu_thread *find_cpu_by_chip_id(u32 chip_id);
+extern struct cpu_thread *find_cpu_by_node(struct dt_node *cpu);
+extern struct cpu_thread *find_cpu_by_server(u32 server_no);
+extern struct cpu_thread *find_cpu_by_pir(u32 pir);
+
+extern struct dt_node *get_cpu_node(u32 pir);
+
+/* Iterator */
+extern struct cpu_thread *first_cpu(void);
+extern struct cpu_thread *next_cpu(struct cpu_thread *cpu);
+
+/* WARNING: CPUs that have been picked up by the OS are no longer
+ * appearing as available and can not have jobs scheduled
+ * on them. Essentially that means that after the OS is
+ * fully started, all CPUs are seen as unavailable from
+ * this API standpoint.
+ */
+
+static inline bool cpu_is_available(struct cpu_thread *cpu)
+{
+ return cpu->state == cpu_state_active ||
+ cpu->state == cpu_state_rvwinkle;
+}
+
+extern struct cpu_thread *first_available_cpu(void);
+extern struct cpu_thread *next_available_cpu(struct cpu_thread *cpu);
+
+#define for_each_cpu(cpu) \
+ for (cpu = first_cpu(); cpu; cpu = next_cpu(cpu))
+
+#define for_each_available_cpu(cpu) \
+ for (cpu = first_available_cpu(); cpu; cpu = next_available_cpu(cpu))
+
+extern struct cpu_thread *first_available_core_in_chip(u32 chip_id);
+extern struct cpu_thread *next_available_core_in_chip(struct cpu_thread *cpu, u32 chip_id);
+
+#define for_each_available_core_in_chip(core, chip_id) \
+ for (core = first_available_core_in_chip(chip_id); core; \
+ core = next_available_core_in_chip(core, chip_id))
+
+/* Return the caller CPU (only after init_cpu_threads) */
+register struct cpu_thread *__this_cpu asm("r13");
+static inline struct cpu_thread *this_cpu(void)
+{
+ return __this_cpu;
+}
+
+/* Get the thread # of a cpu within the core */
+static inline uint32_t cpu_get_thread_index(struct cpu_thread *cpu)
+{
+ return cpu->pir - cpu->primary->pir;
+}
+
+/* Get the core # of a cpu within the core */
+extern uint32_t cpu_get_core_index(struct cpu_thread *cpu);
+
+/* Get the PIR of thread 0 of the same core */
+static inline uint32_t cpu_get_thread0(struct cpu_thread *cpu)
+{
+ return cpu->primary->pir;
+}
+
+static inline bool cpu_is_thread0(struct cpu_thread *cpu)
+{
+ return cpu->primary == cpu;
+}
+
+static inline bool cpu_is_sibling(struct cpu_thread *cpu1,
+ struct cpu_thread *cpu2)
+{
+ return cpu1->primary == cpu2->primary;
+}
+
+/* Called when some error condition requires disabling a core */
+void cpu_disable_all_threads(struct cpu_thread *cpu);
+
+/* Allocate & queue a job on target CPU */
+extern struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
+ void (*func)(void *data), void *data,
+ bool no_return);
+
+static inline struct cpu_job *cpu_queue_job(struct cpu_thread *cpu,
+ void (*func)(void *data),
+ void *data)
+{
+ return __cpu_queue_job(cpu, func, data, false);
+}
+
+
+/* Poll job status, returns true if completed */
+extern bool cpu_poll_job(struct cpu_job *job);
+
+/* Synchronously wait for a job to complete, this will
+ * continue handling the FSP mailbox if called from the
+ * boot CPU. Set free_it to free it automatically.
+ */
+extern void cpu_wait_job(struct cpu_job *job, bool free_it);
+
+/* Free a CPU job, only call on a completed job */
+extern void cpu_free_job(struct cpu_job *job);
+
+/* Called by init to process jobs */
+extern void cpu_process_jobs(void);
+
+static inline void cpu_give_self_os(void)
+{
+ __this_cpu->state = cpu_state_os;
+}
+
+extern void *cpu_stack_bottom(unsigned int pir);
+extern void *cpu_stack_top(unsigned int pir);
+
+#endif /* __CPU_H */
diff --git a/include/device.h b/include/device.h
new file mode 100644
index 0000000..65ceef4
--- /dev/null
+++ b/include/device.h
@@ -0,0 +1,233 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DEVICE_H
+#define __DEVICE_H
+#include <ccan/list/list.h>
+#include <ccan/short_types/short_types.h>
+
+/* Any property or node with this prefix will not be passed to the kernel. */
+#define DT_PRIVATE "skiboot,"
+
+/*
+ * An in-memory representation of a node in the device tree.
+ *
+ * This is trivially flattened into an fdt.
+ *
+ * Note that the add_* routines will make a copy of the name if it's not
+ * a read-only string (ie. usually a string literal).
+ */
+struct dt_property {
+ struct list_node list;
+ const char *name;
+ size_t len;
+ char prop[/* len */];
+};
+
+struct dt_node {
+ const char *name;
+ struct list_node list;
+ struct list_head properties;
+ struct list_head children;
+ struct dt_node *parent;
+ u32 phandle;
+};
+
+/* This is shared with device_tree.c .. make it static when
+ * the latter is gone (hopefully soon)
+ */
+extern u32 last_phandle;
+
+extern struct dt_node *dt_root;
+extern struct dt_node *dt_chosen;
+
+/* Create a root node: ie. a parentless one. */
+struct dt_node *dt_new_root(const char *name);
+
+/* Graft a root node into this tree. */
+bool dt_attach_root(struct dt_node *parent, struct dt_node *root);
+
+/* Add a child node. */
+struct dt_node *dt_new(struct dt_node *parent, const char *name);
+struct dt_node *dt_new_addr(struct dt_node *parent, const char *name,
+ uint64_t unit_addr);
+struct dt_node *dt_new_2addr(struct dt_node *parent, const char *name,
+ uint64_t unit_addr0, uint64_t unit_addr1);
+
+/* Add a property node, various forms. */
+struct dt_property *dt_add_property(struct dt_node *node,
+ const char *name,
+ const void *val, size_t size);
+struct dt_property *dt_add_property_string(struct dt_node *node,
+ const char *name,
+ const char *value);
+struct dt_property *dt_add_property_nstr(struct dt_node *node,
+ const char *name,
+ const char *value, unsigned int vlen);
+
+/* Given out enough GCC extensions, we will achieve enlightenment! */
+#define dt_add_property_strings(node, name, ...) \
+ __dt_add_property_strings((node), ((name)), \
+ sizeof((const char *[]) { __VA_ARGS__ })/sizeof(const char *), \
+ __VA_ARGS__)
+
+struct dt_property *__dt_add_property_strings(struct dt_node *node,
+ const char *name,
+ int count, ...);
+
+/* Given out enough GCC extensions, we will achieve enlightenment! */
+#define dt_add_property_cells(node, name, ...) \
+ __dt_add_property_cells((node), ((name)), \
+ sizeof((u32[]) { __VA_ARGS__ })/sizeof(u32), \
+ __VA_ARGS__)
+
+struct dt_property *__dt_add_property_cells(struct dt_node *node,
+ const char *name,
+ int count, ...);
+
+#define dt_add_property_u64s(node, name, ...) \
+ __dt_add_property_u64s((node), ((name)), \
+ sizeof((u64[]) { __VA_ARGS__ })/sizeof(u64), \
+ __VA_ARGS__)
+
+struct dt_property *__dt_add_property_u64s(struct dt_node *node,
+ const char *name,
+ int count, ...);
+
+static inline struct dt_property *dt_add_property_u64(struct dt_node *node,
+ const char *name, u64 val)
+{
+ return dt_add_property_cells(node, name, (u32)(val >> 32), (u32)val);
+}
+
+void dt_del_property(struct dt_node *node, struct dt_property *prop);
+
+/* Warning: moves *prop! */
+void dt_resize_property(struct dt_property **prop, size_t len);
+
+u32 dt_property_get_cell(const struct dt_property *prop, u32 index);
+
+/* First child of this node. */
+struct dt_node *dt_first(const struct dt_node *root);
+
+/* Return next node, or NULL. */
+struct dt_node *dt_next(const struct dt_node *root, const struct dt_node *prev);
+
+/* Iterate nodes */
+#define dt_for_each_node(root, node) \
+ for (node = dt_first(root); node; node = dt_next(root, node))
+
+#define dt_for_each_child(parent, node) \
+ list_for_each(&parent->children, node, list)
+
+/* Find a string in a string list */
+bool dt_prop_find_string(const struct dt_property *p, const char *s);
+
+/* Check a compatible property */
+bool dt_node_is_compatible(const struct dt_node *node, const char *compat);
+
+/* Find a node based on compatible property */
+struct dt_node *dt_find_compatible_node(struct dt_node *root,
+ struct dt_node *prev,
+ const char *compat);
+
+#define dt_for_each_compatible(root, node, compat) \
+ for (node = NULL; \
+ (node = dt_find_compatible_node(root, node, compat)) != NULL;)
+
+struct dt_node *dt_find_compatible_node_on_chip(struct dt_node *root,
+ struct dt_node *prev,
+ const char *compat,
+ uint32_t chip_id);
+
+#define dt_for_each_compatible_on_chip(root, node, compat, chip_id) \
+ for (node = NULL; \
+ (node = dt_find_compatible_node_on_chip(root, node,\
+ compat, chip_id)) != NULL;)
+
+/* Build the full path for a node. Return a new block of memory, caller
+ * shall free() it
+ */
+char *dt_get_path(const struct dt_node *node);
+
+/* Find a node by path */
+struct dt_node *dt_find_by_path(struct dt_node *root, const char *path);
+
+/* Find a node by phandle */
+struct dt_node *dt_find_by_phandle(struct dt_node *root, u32 phandle);
+
+/* Find a property by name. */
+const struct dt_property *dt_find_property(const struct dt_node *node,\
+ const char *name);
+const struct dt_property *dt_require_property(const struct dt_node *node,
+ const char *name, int wanted_len);
+
+/* non-const variant */
+struct dt_property *__dt_find_property(struct dt_node *node, const char *name);
+
+/* Find a property by name, check if it's the same as val. */
+bool dt_has_node_property(const struct dt_node *node,
+ const char *name, const char *val);
+
+/* Free a node (and any children). */
+void dt_free(struct dt_node *node);
+
+/* Parse an initial fdt */
+void dt_expand(const void *fdt);
+int dt_expand_node(struct dt_node *node, const void *fdt, int fdt_node);
+
+/* Simplified accessors */
+u64 dt_prop_get_u64(const struct dt_node *node, const char *prop);
+u64 dt_prop_get_u64_def(const struct dt_node *node, const char *prop, u64 def);
+u32 dt_prop_get_u32(const struct dt_node *node, const char *prop);
+u32 dt_prop_get_u32_def(const struct dt_node *node, const char *prop, u32 def);
+const void *dt_prop_get(const struct dt_node *node, const char *prop);
+const void *dt_prop_get_def(const struct dt_node *node, const char *prop,
+ void *def);
+const void *dt_prop_get_def_size(const struct dt_node *node, const char *prop,
+ void *def, size_t *len);
+u32 dt_prop_get_cell(const struct dt_node *node, const char *prop, u32 cell);
+u32 dt_prop_get_cell_def(const struct dt_node *node, const char *prop, u32 cell, u32 def);
+
+/* Parsing helpers */
+u32 dt_n_address_cells(const struct dt_node *node);
+u32 dt_n_size_cells(const struct dt_node *node);
+u64 dt_get_number(const void *pdata, unsigned int cells);
+
+/* Find an ibm,chip-id property in this node; if not found, walk up the parent
+ * nodes. Returns -1 if no chip-id property exists. */
+u32 dt_get_chip_id(const struct dt_node *node);
+
+/* Address accessors ("reg" properties parsing). No translation,
+ * only support "simple" address forms (1 or 2 cells). Asserts
+ * if address doesn't exist
+ */
+u64 dt_get_address(const struct dt_node *node, unsigned int index,
+ u64 *out_size);
+
+/* Count "reg" property entries */
+unsigned int dt_count_addresses(const struct dt_node *node);
+
+/* Address translation
+ *
+ * WARNING: Current implementation is simplified and will not
+ * handle complex address formats with address space indicators
+ * nor will it handle "ranges" translations yet... (XX TODO)
+ */
+u64 dt_translate_address(const struct dt_node *node, unsigned int index,
+ u64 *out_size);
+
+#endif /* __DEVICE_H */
diff --git a/include/device_tree.h b/include/device_tree.h
new file mode 100644
index 0000000..d04f20a
--- /dev/null
+++ b/include/device_tree.h
@@ -0,0 +1,35 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DEVICE_TREE_H
+#define __DEVICE_TREE_H
+#include <stdint.h>
+
+/* Note: Device tree creation has no locks. It's assumed to be done
+ * by a single processor in a non racy way
+ */
+void *create_dtb(const struct dt_node *root);
+
+/* Helpers to cache errors in fdt; use this instead of fdt_* */
+uint32_t dt_begin_node(const char *name); /* returns phandle */
+void dt_property_string(const char *name, const char *value);
+void dt_property_cell(const char *name, u32 cell);
+void dt_property_cells(const char *name, int count, ...);
+void dt_property(const char *name, const void *val, size_t size);
+void dt_end_node(void);
+
+
+#endif /* __DEVICE_TREE_H */
diff --git a/include/ec/config.h b/include/ec/config.h
new file mode 100644
index 0000000..201ccac
--- /dev/null
+++ b/include/ec/config.h
@@ -0,0 +1,82 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file config.H
+ *
+ * @brief Definitions for EC configuration values.
+ *
+ */
+
+#ifndef __EC_CONFIG_H_
+#define __EC_CONFIG_H_
+
+#include <stdint.h>
+
+#define EC_RTC_PORT_BASE (0x70) // RTC/CMOS LPC base address
+#define EC_RTC_BLOCK_SIZE (512) // Size of addressable data in RTC
+#define EC_RTC_CENTURY (1) // 1 if century format is enabled
+#if EC_RTC_CENTURY
+#define EC_RTC_BBRAM_OFFSET (0x33) // Offset of NV data (= size of calendar)
+#else
+#define EC_RTC_BBRAM_OFFSET (0x0E) // Offset of NV data (= size of calendar)
+#endif // #if EC_RTC_CENTURY
+
+#define EC_RTCDD_READ_TRIES (2) // Times to try the RTC if updating
+#define EC_RTCDD_RETRY_DELAY (300000) // Delay between RTC read retries in ns
+ // based on update time of 244 + 30.5 µs
+
+#define EC_GPIO_INDEX 0x200
+#define EC_GPIO_DATA 0x201
+#define EC_GPIO_NUM_PORTS 17
+#define EC_GPIO_PORT_SKIP 4
+
+#define EC_GPIO_DATA_OFFSET 0x0
+#define EC_GPIO_DDR_OFFSET 0x1
+#define EC_GPIO_PIN_OFFSET 0x2
+#define EC_GPIO_PUP_OFFSET 0x3
+
+typedef enum EcGpioPort {
+ EC_GPIO_PORT_A = 0,
+ EC_GPIO_PORT_B = 1,
+ EC_GPIO_PORT_C = 2,
+ EC_GPIO_PORT_D = 3,
+ EC_GPIO_PORT_E = 4,
+ EC_GPIO_PORT_F = 5,
+ EC_GPIO_PORT_G = 6,
+ EC_GPIO_PORT_H = 7,
+ // skip port I
+ EC_GPIO_PORT_J = 8,
+ EC_GPIO_PORT_K = 9,
+ EC_GPIO_PORT_L = 10,
+ EC_GPIO_PORT_M = 11,
+ EC_GPIO_PORT_N = 12,
+ // skip port O
+ EC_GPIO_PORT_P = 13,
+ EC_GPIO_PORT_Q = 14,
+ EC_GPIO_PORT_R = 15,
+ EC_GPIO_PORT_S = 16,
+} EcGpioPort;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+void ec_outb(uint16_t, uint8_t);
+uint8_t ec_inb(uint16_t);
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __EC_CONFIG_H_
diff --git a/include/ec/gpio.h b/include/ec/gpio.h
new file mode 100644
index 0000000..82a9343
--- /dev/null
+++ b/include/ec/gpio.h
@@ -0,0 +1,53 @@
+/* Copyright 2013-2014 Google Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file gpio.h
+ *
+ * @brief Public interface of the EC GPIO device driver
+ *
+ */
+
+#ifndef __EC_GPIO_H_
+#define __EC_GPIO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EC_GPIO_INPUT 0
+#define EC_GPIO_OUTPUT 1
+#define EC_GPIO_PULLUP_DISABLE 0
+#define EC_GPIO_PULLUP_ENABLE 1
+
+// Sets up a GPIO as output or input.
+// Returns: <0 on error
+int ec_gpio_setup(EcGpioPort port, uint8_t pin,
+ int is_output, int pullup_enable);
+
+// Reads the current value of an input GPIO.
+// Returns: GPIO value (0,1) or <0 on error.
+int ec_gpio_read(EcGpioPort port, uint8_t pin);
+
+// Sets the driving value of an output GPIO. Port should already be set
+// to output mode.
+// Returns: <0 on error
+int ec_gpio_set(EcGpioPort port, uint8_t pin, int val);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __EC_GPIO_H_
diff --git a/include/elf.h b/include/elf.h
new file mode 100644
index 0000000..0a52f3e
--- /dev/null
+++ b/include/elf.h
@@ -0,0 +1,135 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ELF_H
+#define __ELF_H
+
+#include <stdint.h>
+
+/* Generic ELF header */
+struct elf_hdr {
+ uint32_t ei_ident;
+#define ELF_IDENT 0x7F454C46
+ uint8_t ei_class;
+#define ELF_CLASS_32 1
+#define ELF_CLASS_64 2
+ uint8_t ei_data;
+#define ELF_DATA_LSB 1
+#define ELF_DATA_MSB 2
+ uint8_t ei_version;
+ uint8_t ei_pad[9];
+ uint16_t e_type;
+ uint16_t e_machine;
+#define ELF_MACH_PPC32 0x14
+#define ELF_MACH_PPC64 0x15
+ uint32_t e_version;
+};
+
+/* 64-bit ELF header */
+struct elf64_hdr {
+ uint32_t ei_ident;
+ uint8_t ei_class;
+ uint8_t ei_data;
+ uint8_t ei_version;
+ uint8_t ei_pad[9];
+ uint16_t e_type;
+ uint16_t e_machine;
+ uint32_t e_version;
+ uint64_t e_entry;
+ uint64_t e_phoff;
+ uint64_t e_shoff;
+ uint32_t e_flags;
+ uint16_t e_ehsize;
+ uint16_t e_phentsize;
+ uint16_t e_phnum;
+ uint16_t e_shentsize;
+ uint16_t e_shnum;
+ uint16_t e_shstrndx;
+};
+
+/* 64-bit ELF program header */
+struct elf64_phdr {
+ uint32_t p_type;
+#define ELF_PTYPE_LOAD 1
+ uint32_t p_flags;
+#define ELF_PFLAGS_R 0x4
+#define ELF_PFLAGS_W 0x2
+#define ELF_PFLAGS_X 0x1
+ uint64_t p_offset;
+ uint64_t p_vaddr;
+ uint64_t p_paddr;
+ uint64_t p_filesz;
+ uint64_t p_memsz;
+ uint64_t p_align;
+};
+
+/* Some relocation related stuff used in relocate.c */
+struct elf64_dyn {
+ int64_t d_tag;
+#define DT_NULL 0
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_RELACOUNT 0x6ffffff9
+ uint64_t d_val;
+};
+
+struct elf64_rela {
+ uint64_t r_offset;
+ uint64_t r_info;
+#define ELF64_R_TYPE(info) ((info) & 0xffffffffu)
+ int64_t r_addend;
+};
+
+/* relocs we support */
+#define R_PPC64_RELATIVE 22
+
+/* 32-bit ELF header */
+struct elf32_hdr {
+ uint32_t ei_ident;
+ uint8_t ei_class;
+ uint8_t ei_data;
+ uint8_t ei_version;
+ uint8_t ei_pad[9];
+ uint16_t e_type;
+ uint16_t e_machine;
+ uint32_t e_version;
+ uint32_t e_entry;
+ uint32_t e_phoff;
+ uint32_t e_shoff;
+ uint32_t e_flags;
+ uint16_t e_ehsize;
+ uint16_t e_phentsize;
+ uint16_t e_phnum;
+ uint16_t e_shentsize;
+ uint16_t e_shnum;
+ uint16_t e_shstrndx;
+};
+
+/* 32-bit ELF program header*/
+struct elf32_phdr {
+ uint32_t p_type;
+ uint32_t p_offset;
+ uint32_t p_vaddr;
+ uint32_t p_paddr;
+ uint32_t p_filesz;
+ uint32_t p_memsz;
+ uint32_t p_flags;
+ uint32_t p_align;
+};
+
+
+#endif /* __ELF_H */
diff --git a/include/fsi-master.h b/include/fsi-master.h
new file mode 100644
index 0000000..e727214
--- /dev/null
+++ b/include/fsi-master.h
@@ -0,0 +1,36 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FSI_MASTER_H
+#define __FSI_MASTER_H
+
+/*
+ * Definition of the MFSI masters
+ */
+#define MFSI_cMFSI0 0
+#define MFSI_cMFSI1 1
+#define MFSI_hMFSI0 2
+
+extern int64_t mfsi_read(uint32_t chip, uint32_t mfsi, uint32_t port,
+ uint32_t fsi_addr, uint32_t *data);
+
+extern int64_t mfsi_write(uint32_t chip, uint32_t mfsi, uint32_t port,
+ uint32_t fsi_addr, uint32_t data);
+
+extern void mfsi_init(void);
+
+#endif /* __FSI_MASTER_H */
+
diff --git a/include/fsp-elog.h b/include/fsp-elog.h
new file mode 100644
index 0000000..34913c5
--- /dev/null
+++ b/include/fsp-elog.h
@@ -0,0 +1,325 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <opal.h>
+#ifndef __ELOG_H
+#define __ELOG_H
+
+#define ELOG_TYPE_PEL 0
+#define MAX_RETRIES 3
+
+/* Component IDs */
+/* In PEL error log format, Creator ID is hypervisor
+ * But we can have various component ID to distinguish
+ * which component in hypervisor is reporting the error
+ * This is 2 bytes long,
+ * first byte corresponds to Component IDs
+ * Second byte is reserved for the Reason code.
+ * Component ID is mapped to readable 4-digit ascii
+ * character name in FSP and displayed.
+ */
+/* SAPPHIRE components */
+#define OPAL_CODEUPDATE 0x1000
+#define OPAL_CONSOLE 0x2000
+#define OPAL_CEC 0x3000
+#define OPAL_CHIP 0x4000
+#define OPAL_ELOG 0x5000
+#define OPAL_NVRAM 0x6000
+#define OPAL_RTC 0x7000
+#define OPAL_SURVEILLANCE 0x8000
+#define OPAL_SYSPARAM 0x9000
+#define OPAL_LPC 0xa000
+#define OPAL_UART 0xb000
+#define OPAL_OCC 0xc000
+#define OPAL_OP_PANEL 0xd000
+#define OPAL_PHB3 0xe000
+#define OPAL_PSI 0xf000
+#define OPAL_VPD 0x1000
+#define OPAL_XSCOM 0x1100
+#define OPAL_PCI 0x1200
+#define OPAL_MISC 0x1300
+#define OPAL_ATTN 0x1400
+#define OPAL_MEM_ERR 0x1500
+#define OPAL_CENTAUR 0x1600
+#define OPAL_MFSI 0x1700
+#define OPAL_DUMP 0x1800
+#define OPAL_LED 0x1900
+#define OPAL_SENSOR 0x2000
+#define OPAL_SLW 0x2100
+#define OPAL_FSP 0x2200
+
+enum opal_reasoncode {
+/* code update */
+ OPAL_RC_CU_FLASH = OPAL_CODEUPDATE | 0x10,
+ OPAL_RC_CU_INIT = OPAL_CODEUPDATE | 0x11,
+ OPAL_RC_CU_SG_LIST = OPAL_CODEUPDATE | 0x12,
+ OPAL_RC_CU_COMMIT = OPAL_CODEUPDATE | 0x13,
+ OPAL_RC_CU_MSG = OPAL_CODEUPDATE | 0x14,
+ OPAL_RC_CU_NOTIFY = OPAL_CODEUPDATE | 0x15,
+ OPAL_RC_CU_MARKER_LID = OPAL_CODEUPDATE | 0x16,
+/* NVRAM */
+ OPAL_RC_NVRAM_INIT = OPAL_NVRAM | 0x10,
+ OPAL_RC_NVRAM_OPEN = OPAL_NVRAM | 0x11,
+ OPAL_RC_NVRAM_SIZE = OPAL_NVRAM | 0x12,
+ OPAL_RC_NVRAM_WRITE = OPAL_NVRAM | 0x13,
+ OPAL_RC_NVRAM_READ = OPAL_NVRAM | 0x14,
+/* CENTAUR */
+ OPAL_RC_CENTAUR_INIT = OPAL_CENTAUR | 0x10,
+ OPAL_RC_CENTAUR_RW_ERR = OPAL_CENTAUR | 0x11,
+/* MFSI */
+ OPAL_RC_MFSI_RW_ERR = OPAL_MFSI | 0x10,
+/* UART */
+ OPAL_RC_UART_INIT = OPAL_UART | 0x10,
+/* OCC */
+ OPAL_RC_OCC_RESET = OPAL_OCC | 0x10,
+ OPAL_RC_OCC_LOAD = OPAL_OCC | 0x11,
+ OPAL_RC_OCC_PSTATE_INIT = OPAL_OCC | 0x12,
+/* RTC */
+ OPAL_RC_RTC_READ = OPAL_RTC | 0x10,
+ OPAL_RC_RTC_TOD = OPAL_RTC | 0x11,
+/* SURVEILLANCE */
+ OPAL_RC_SURVE_INIT = OPAL_SURVEILLANCE | 0x10,
+ OPAL_RC_SURVE_STATUS = OPAL_SURVEILLANCE | 0x11,
+/* SYSPARAM */
+ OPAL_RC_SYSPARM_INIT = OPAL_SYSPARAM | 0x10,
+ OPAL_RC_SYSPARM_MSG = OPAL_SYSPARAM | 0x11,
+/* LPC */
+ OPAL_RC_LPC_READ = OPAL_LPC | 0x10,
+ OPAL_RC_LPC_WRITE = OPAL_LPC | 0x11,
+/* OP_PANEL */
+ OPAL_RC_PANEL_WRITE = OPAL_OP_PANEL | 0x10,
+/* PSI */
+ OPAL_RC_PSI_INIT = OPAL_PSI | 0x10,
+ OPAL_RC_PSI_IRQ_RESET = OPAL_PSI | 0x11,
+/* XSCOM */
+ OPAL_RC_XSCOM_RW = OPAL_XSCOM | 0x10,
+ OPAL_RC_XSCOM_INDIRECT_RW = OPAL_XSCOM | 0x11,
+ OPAL_RC_XSCOM_RESET = OPAL_XSCOM | 0x12,
+/* PCI */
+ OPAL_RC_PCI_INIT_SLOT = OPAL_PCI | 0x10,
+ OPAL_RC_PCI_ADD_SLOT = OPAL_PCI | 0x11,
+ OPAL_RC_PCI_SCAN = OPAL_PCI | 0x12,
+ OPAL_RC_PCI_RESET_PHB = OPAL_PCI | 0x10,
+/* ATTN */
+ OPAL_RC_ATTN = OPAL_ATTN | 0x10,
+/* MEM_ERR */
+ OPAL_RC_MEM_ERR_RES = OPAL_MEM_ERR | 0x10,
+ OPAL_RC_MEM_ERR_DEALLOC = OPAL_MEM_ERR | 0x11,
+/* DUMP */
+ OPAL_RC_DUMP_INIT = OPAL_DUMP | 0x10,
+ OPAL_RC_DUMP_LIST = OPAL_DUMP | 0x11,
+ OPAL_RC_DUMP_ACK = OPAL_DUMP | 0x12,
+ OPAL_RC_DUMP_MDST_INIT = OPAL_DUMP | 0x13,
+ OPAL_RC_DUMP_MDST_UPDATE = OPAL_DUMP | 0x14,
+/* LED */
+ OPAL_RC_LED_SPCN = OPAL_LED | 0x10,
+ OPAL_RC_LED_BUFF = OPAL_LED | 0x11,
+ OPAL_RC_LED_LC = OPAL_LED | 0x12,
+ OPAL_RC_LED_STATE = OPAL_LED | 0x13,
+ OPAL_RC_LED_SUPPORT = OPAL_LED | 0x14,
+/* SENSOR */
+ OPAL_RC_SENSOR_INIT = OPAL_SENSOR | 0x10,
+ OPAL_RC_SENSOR_READ = OPAL_SENSOR | 0x11,
+ OPAL_RC_SENSOR_ASYNC_COMPLETE
+ = OPAL_SENSOR | 0x12,
+/* SLW */
+ OPAL_RC_SLW_INIT = OPAL_SLW | 0x10,
+ OPAL_RC_SLW_SET = OPAL_SLW | 0x11,
+ OPAL_RC_SLW_GET = OPAL_SLW | 0x12,
+ OPAL_RC_SLW_REG = OPAL_SLW | 0x13,
+/* FSP */
+ OPAL_RC_FSP_POLL_TIMEOUT
+ = OPAL_FSP | 0x10,
+};
+
+/* Data Structures for PEL data. */
+
+#define PRIVATE_HEADER_SECTION_SIZE 48
+#define USER_HEADER_SECTION_SIZE 24
+#define SRC_SECTION_SIZE 80
+#define SRC_SUBSECTION_SIZE 4
+#define SRC_LENGTH 72
+#define OPAL_MAX_SRC_BYTES 32
+#define EXTENDED_HEADER_SECTION_SIZE 76
+#define MTMS_SECTION_SIZE 28
+#define IO_EVENT_SECTION_SIZE 16
+
+#define OPAL_ELOG_VERSION 1
+#define OPAL_ELOG_SST 0
+#define OPAL_SRC_MAX_WORD_COUNT 8
+
+#define OPAL_SRC_FORMAT 0x80
+#define OPAL_FAILING_SUBSYSTEM 0x82
+
+#define OPAL_SYS_MODEL_LEN 8
+#define OPAL_SYS_SERIAL_LEN 12
+#define OPAL_VER_LEN 16
+#define OPAL_SYMPID_LEN 80
+#define OPAL_RC_NONE 0
+
+#define OPAL_IO_MAX_RPC_DATA 216
+#define OPAL_SRC_SEC_VER 0x02
+#define OPAL_EXT_HRD_VER 0x01
+
+/* Error log reporting action */
+#define ERRL_ACTION_REPORT 0x2000
+#define ERRL_ACTION_NONE 0x0000
+
+enum elogSectionId {
+ ELOG_SID_PRIVATE_HEADER = 0x5048, /* PH */
+ ELOG_SID_USER_HEADER = 0x5548, /* UH */
+ ELOG_SID_EXTENDED_HEADER = 0x4548, /* EH */
+ ELOG_SID_PRIMARY_SRC = 0x5053, /* PS */
+ ELOG_SID_MACHINE_TYPE = 0x4D54, /* MT */
+ ELOG_SID_SECONDARY_SRC = 0x5353, /* SS */
+ ELOG_SID_CALL_HOME = 0x4348, /* CH */
+ ELOG_SID_DUMP_LOCATOR = 0x4448, /* DH */
+ ELOG_SID_SOFTWARE_ERROR = 0x5357, /* SW */
+ ELOG_SID_PARTITION = 0x4C50, /* LP */
+ ELOG_SID_LOGICAL_RESOURCE = 0x4C52, /* LR */
+ ELOG_SID_HMC_ID = 0x484D, /* HM */
+ ELOG_SID_EPOW = 0x4550, /* EP */
+ ELOG_SID_IO_EVENT = 0x4945, /* IE */
+ ELOG_SID_MFG_INFORMATION = 0x4D49, /* MI */
+ ELOG_SID_USER_DEFINED = 0x5544 /* UD */
+};
+
+struct opal_v6_header {
+ enum elogSectionId id:16; /* section id */
+ uint16_t length; /* section length */
+ uint8_t version; /* section version */
+ uint8_t subtype; /* section sub-type id */
+ uint16_t component_id; /* component id of section creator */
+};
+
+/* opal_srctype */
+#define OPAL_SRC_TYPE_ERROR 0xBB
+
+#define OPAL_CID_SAPPHIRE 'K' /* creator ID for sapphire log */
+#define OPAL_CID_POWERNV 'P' /* creator ID for powernv log */
+
+/* Origin of error, elog_origin */
+#define ORG_SAPPHIRE 1
+#define ORG_POWERNV 2
+
+/*struct opal_private head section_ */
+struct opal_private_header_section {
+
+ struct opal_v6_header v6header;
+ uint32_t create_date;
+ uint32_t create_time;
+ uint32_t commit_date;
+ uint32_t commit_time;
+
+ uint32_t creator_id:8; /* subsystem component id */
+ uint32_t reserved_0:16;
+ uint32_t section_count:8; /* number of sections in log */
+ uint32_t reserved_1;
+ uint32_t creator_subid_hi;
+ uint32_t creator_subid_lo;
+ uint32_t plid; /* platform log id */
+ uint32_t log_entry_id; /* Unique log entry id */
+};
+
+/* opal user header section */
+struct opal_user_header_section {
+
+ struct opal_v6_header v6header;
+
+ uint8_t subsystem_id; /* subsystem id */
+ uint8_t event_scope;
+ uint8_t event_severity;
+ uint8_t event_type; /* error/event severity */
+
+ uint32_t reserved_0;
+ uint16_t reserved_1;
+ uint16_t action_flags; /* error action code */
+ uint32_t reserved_2;
+};
+
+struct opal_src_section {
+ struct opal_v6_header v6header;
+ uint8_t version;
+ uint8_t flags;
+ uint8_t reserved_0;
+ uint8_t wordcount;
+ uint16_t reserved_1;
+ uint16_t srclength;
+ uint32_t hexwords[OPAL_SRC_MAX_WORD_COUNT];
+ char srcstring[OPAL_MAX_SRC_BYTES];
+};
+
+struct opal_extended_header_section {
+ struct opal_v6_header v6header;
+ char model[OPAL_SYS_MODEL_LEN];
+ char serial_no[OPAL_SYS_SERIAL_LEN];
+ char opal_release_version[OPAL_VER_LEN];
+ char opal_subsys_version[OPAL_VER_LEN];
+ uint16_t reserved_0;
+ uint32_t extended_header_date;
+ uint32_t extended_header_time;
+ uint16_t reserved_1;
+ uint8_t reserved_2;
+ uint8_t opal_symid_len;
+ char opalsymid[OPAL_SYMPID_LEN];
+};
+
+/* opal MTMS section */
+struct opal_mtms_section {
+ struct opal_v6_header v6header;
+ char model[OPAL_SYS_MODEL_LEN];
+ char serial_no[OPAL_SYS_SERIAL_LEN];
+};
+
+/* User defined section */
+struct opal_user_section {
+ struct opal_v6_header v6header;
+ char dump[1];
+};
+
+struct opal_err_info {
+ uint32_t reason_code;
+ uint8_t err_type;
+ uint16_t cmp_id;
+ uint8_t subsystem;
+ uint8_t sev;
+ uint8_t event_subtype;
+ void (*call_out)(struct opal_errorlog *buf, void *data, uint16_t size);
+};
+
+#define DEFINE_LOG_ENTRY(reason, type, id, subsys, \
+severity, subtype, callout_func) struct opal_err_info err_##reason = \
+{ .reason_code = reason, .err_type = type, .cmp_id = id, \
+.subsystem = subsys, .sev = severity, .event_subtype = subtype, \
+.call_out = callout_func }
+
+#define e_info(reason_code) err_##reason_code
+
+struct opal_errorlog *opal_elog_create(struct opal_err_info *e_info);
+
+int opal_elog_update_user_dump(struct opal_errorlog *buf, unsigned char *data,
+ uint32_t tag, uint16_t size);
+
+int elog_fsp_commit(struct opal_errorlog *buf);
+
+/* This is wrapper around the error log function, which creates
+ * and commits the error to FSP.
+ * Used for simple error logging
+ */
+void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+void log_error(struct opal_err_info *e_info, void *data, uint16_t size,
+ const char *fmt, ...) __attribute__ ((format (printf, 4, 5)));
+
+#endif /* __ELOG_H */
diff --git a/include/fsp-leds.h b/include/fsp-leds.h
new file mode 100644
index 0000000..66cbe8a
--- /dev/null
+++ b/include/fsp-leds.h
@@ -0,0 +1,135 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ */
+
+
+/*
+ * SPCN based LED location code and other information
+ */
+
+#ifndef __FSP_LEDS_H
+#define __FSP_LEDS_H
+
+/* Supported FSP response codes */
+#define FSP_IND_NOT_IMPLMNTD 0x00 /* Indicator not implemented */
+#define FSP_IND_IMPLMNTD 0x04 /* Indicator implemented */
+#define FSP_IND_IMPL_UNKNOWN 0x08 /* Implemtation unknown */
+#define FSP_IND_INACTIVE 0x00 /* Indicator not active */
+#define FSP_IND_IDENTIFY_ACTV 0x01 /* Identify state active */
+#define FSP_IND_FAULT_ACTV 0x02 /* Fault state active */
+#define FSP_IND_STATE_UNKNOWN 0xff /* Indicator state unknown */
+#define FSP_RSRC_NOT_PRESENT 0x00 /* Resource not present */
+#define FSP_RSRC_PRESENT 0x40 /* Resource present */
+#define FSP_RSRC_PRSNC_UNKNOWN 0x80 /* Resource presence unknown */
+
+/* LED exclussive bits */
+#define FSP_LED_EXCL_FAULT 1UL << 0
+#define FSP_LED_EXCL_IDENTIFY 1UL << 1
+
+/* SPCN set LED */
+struct spcn_led_data {
+ u8 lc_len;
+ u16 state;
+ char lc_code[LOC_CODE_SIZE];
+};
+
+/* LED data */
+struct fsp_led_data {
+ u16 rid; /* Resource ID */
+ u8 lc_len; /* Location code len */
+ char loc_code[LOC_CODE_SIZE];
+ u16 parms; /* Parameters */
+ u16 status; /* Status */
+ u16 ckpt_status; /* Checkpointed status */
+ u16 excl_bit; /* Exclussive LED bit */
+ struct list_node link;
+};
+
+/* FSP location code request */
+struct fsp_loc_code_req {
+ u16 len;
+ u16 req_type;
+ u8 raw_len;
+ u8 lc_sz;
+ char loc_code[LOC_CODE_SIZE];
+};
+
+/* FSP location code data */
+struct fsp_loc_code_data {
+ u16 size;
+ u32 ccin;
+ u8 status;
+ u8 ind_state;
+ u8 raw_len;
+ u8 fld_sz;
+
+ /* The size below must include the padding to
+ * make the whole structure aligned to a
+ * multiple of 4 bytes
+ */
+ char loc_code[LOC_CODE_SIZE + 2]; /* 82 */
+
+ /* We need to pack the structure otherwise the
+ * compiler adds additional alignment to make
+ * it 8 bytes aligned
+ */
+} __packed;
+
+/* Get indicator state request */
+struct fsp_get_ind_state_req {
+ u16 size;
+ u8 lc_len;
+ u8 fld_sz;
+ char loc_code[LOC_CODE_SIZE];
+};
+
+/* Set indicator state request */
+struct fsp_set_ind_state_req {
+ u16 size;
+ u16 req_type;
+ u8 reserved[3];
+ u8 ind_state;
+ u8 lc_len;
+ u8 fld_sz;
+ char loc_code[LOC_CODE_SIZE];
+};
+
+/* LED commands and state */
+#define LED_COMMAND_FAULT 1
+#define LED_COMMAND_IDENTIFY 0
+#define LED_STATE_ON 1
+#define LED_STATE_OFF 0
+
+/* FSP get loc-code list command request type */
+#define GET_LC_CMPLT_SYS 0x8000
+#define GET_LC_ENCLOSURES 0x4000
+#define GET_LC_ENCL_DESCENDANTS 0x2000
+#define GET_LC_SINGLE_LOC_CODE 0x0100
+
+/* FSP set indicator command request type */
+#define SET_IND_ENCLOSURE 0x4000
+#define SET_IND_SINGLE_LOC_CODE 0x0001
+
+/* Response buffer */
+#define OUTBUF_HEADER_SIZE 8
+
+/* LED miscellaneous */
+#define LOC_CODE_LEN 1
+#define LED_CONTROL_LEN 2
+#define FSP_LC_STRUCT_FIXED_SZ 0x0a
+
+#endif
diff --git a/include/fsp-mdst-table.h b/include/fsp-mdst-table.h
new file mode 100644
index 0000000..ae2ef12
--- /dev/null
+++ b/include/fsp-mdst-table.h
@@ -0,0 +1,37 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __FSPMDST_H
+#define __FSPMDST_H
+
+/* Dump section type */
+#define DUMP_SECTION_CONSOLE 0x01
+#define DUMP_SECTION_HBRT_LOG 0x02
+
+/*
+ * Sapphire Memory Dump Source Table
+ *
+ * Format of this table is same as Memory Dump Source Table (MDST)
+ * defined in HDAT spec.
+ */
+struct dump_mdst_table {
+ uint64_t addr;
+ uint32_t type; /* DUMP_SECTION_* */
+ uint32_t size;
+};
+
+#endif /* __FSPMDST_H */
diff --git a/include/fsp-sysparam.h b/include/fsp-sysparam.h
new file mode 100644
index 0000000..1a7a472
--- /dev/null
+++ b/include/fsp-sysparam.h
@@ -0,0 +1,57 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __FSP_SYSPARAM_H
+#define __FSP_SYSPARAM_H
+
+/* System parameter numbers used in the protocol
+ *
+ * these are the only ones we care about right now
+ */
+#define SYS_PARAM_SURV 0xf0000001
+#define SYS_PARAM_HMC_MANAGED 0xf0000003
+#define SYS_PARAM_FLASH_POLICY 0xf0000012
+#define SYS_PARAM_NEED_HMC 0xf0000016
+#define SYS_PARAM_FW_LICENSE 0xf000001d
+#define SYS_PARAM_WWPN 0xf0000023
+#define SYS_PARAM_DEF_BOOT_DEV 0xf0000024
+#define SYS_PARAM_NEXT_BOOT_DEV 0xf0000025
+
+
+
+/* Completion for a sysparam call. err_len is either a negative error
+ * code or the positive length of the returned data
+ */
+typedef void (*sysparam_compl_t)(uint32_t param_id, int err_len, void *data);
+
+
+/* Send a sysparam query request. Operation can be synchronous or
+ * asynchronous:
+ *
+ * - synchronous (async_complete is NULL), the result code is either
+ * a negative error code or a positive returned length.
+ *
+ * - asynchronous (async_complete non NULL). The result code is 0 for
+ * successfully queued request or an error for an immediate error.
+ * A successfully queued request will complete via the completion
+ * callback defined above
+ */
+int fsp_get_sys_param(uint32_t param_id, void *buffer, uint32_t length,
+ sysparam_compl_t async_complete, void *comp_data);
+
+
+void fsp_sysparam_init(void);
+
+#endif /* __FSP_SYSPARAM_H */
diff --git a/include/fsp.h b/include/fsp.h
new file mode 100644
index 0000000..b6f6f0e
--- /dev/null
+++ b/include/fsp.h
@@ -0,0 +1,755 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * IBM System P FSP (Flexible Service Processor)
+ */
+#ifndef __FSP_H
+#define __FSP_H
+
+#include <skiboot.h>
+#include <psi.h>
+
+/* Current max number of FSPs
+ * one primary and one secondary is all we support
+ */
+#define FSP_MAX 2
+
+/* Command protocol.
+ *
+ * Commands have a byte class and a byte subcommand. With the exception
+ * of some HMC related commands (class 0xe0) which we don't support,
+ * only one outstanding command is allowed for a given class.
+ *
+ * Note: 0xCE and 0xCF fall into the same class, ie, only one of them can
+ * be outstanding.
+ *
+ * A command is outstanding until it has been acknowledged. This doesn't
+ * imply a response, the response can come later.
+ */
+
+/* Protocol status error codes used by the protocol */
+#define FSP_STATUS_SUCCESS 0x00 /* Command successful */
+#define FSP_STATUS_MORE_DATA 0x02 /* Success, EOF not reached */
+#define FSP_STATUS_DATA_INLINE 0x11 /* Data inline in mbox */
+#define FSP_STATUS_INVALID_SUBCMD 0x20
+#define FSP_STATUS_INVALID_MOD 0x21
+#define FSP_STATUS_INVALID_DATA 0x22
+#define FSP_STATUS_INVALID_DPOSTATE 0x23
+#define FSP_STATUS_DMA_ERROR 0x24
+#define FSP_STATUS_INVALID_CMD 0x2c
+#define FSP_STATUS_SEQ_ERROR 0x2d
+#define FSP_STATUS_BAD_STATE 0x2e
+#define FSP_STATUS_NOT_SUPPORTED 0x2f
+#define FSP_STATUS_FILE_TOO_LARGE 0x43
+#define FSP_STATUS_FLASH_INPROGRESS 0x61
+#define FSP_STATUS_FLASH_NOPROGRESS 0x62
+#define FSP_STATUS_FLASH_INVALID_SIDE 0x63
+#define FSP_STATUS_GENERIC_ERROR 0xfe
+#define FSP_STATUS_EOF_ERROR 0x02
+#define FSP_STATUS_DMA_ERROR 0x24
+#define FSP_STATUS_BUSY 0x3e
+#define FSP_STATUS_FLASH_BUSY 0x3f
+#define FSP_STATUS_INVALID_SUBID 0x41
+#define FSP_STATUS_LENGTH_ERROR 0x42
+#define FSP_STAUS_INVALID_HMC_ID 0x51
+#define FSP_STATUS_SPCN_ERROR 0xA8 /* SPCN error */
+#define FSP_STATUS_INVALID_LC 0xC0 /* Invalid location code */
+
+/*
+ * FSP registers
+ *
+ * All of the below register definitions come from the FSP0 "Black Widow" spec
+ * They are the same for FSP1 except they are presented big-endian vs
+ * little-endian for FSP0 -- which used PCI
+ * all regs are 4 bytes wide, and we read the larger data areas in 4 byte
+ * granularity as well
+ *
+ * there are actually two defined sets of MBX registers
+ * MBX2 can't generate interrupts to the host and only MBX1 is currently
+ * used by firmware running on the FSP, so we're mostly ignoring MBX2
+ */
+
+/* Device Reset Control Register */
+#define FSP_DRCR_REG 0x00
+#define FSP_DRCR_CLR_REG 0x04
+
+/* Bit masks for DRCR */
+#define FSP_DRCR_CMD_VALID PPC_BIT32(16)
+#define FSP_DRCR_TERMINATE PPC_BIT32(17)
+#define FSP_DRCR_PREP_FOR_RESET PPC_BIT32(23)
+#define FSP_DRCR_CLEAR_DISR PPC_BIT32(30)
+
+/* DRCR commands need the CMD_VALID bit set */
+#define FSP_PREP_FOR_RESET_CMD (FSP_DRCR_CMD_VALID | \
+ FSP_DRCR_PREP_FOR_RESET)
+#define FSP_DRCR_ACK_MASK (0xff << 8)
+
+/* Device Immediate Status Register */
+#define FSP_DISR_REG 0x08
+#define FSP_DISR_CLR_REG 0x0C
+
+/* Bit masks for DISR */
+#define FSP_DISR_FSP_RR_COMPLETE PPC_BIT32(22)
+#define FSP_DISR_RUNTIME_STATE_SYNCD PPC_BIT32(24)
+#define FSP_DISR_DBG_IN_PROGRESS PPC_BIT32(25)
+#define FSP_DISR_FSP_IN_RR PPC_BIT32(26)
+#define FSP_DISR_FSP_REBOOT_IN_PROGRESS PPC_BIT32(27)
+#define FSP_DISR_CRIT_OP_IN_PROGRESS PPC_BIT32(28)
+#define FSP_DISR_STATUS_ACK_RXD PPC_BIT32(31)
+
+/* The host version of the control register shares bits with the FSP's
+ * control reg. Those bits are defined such that one side can set
+ * a bit and the other side can clear it
+ */
+#define FSP_MBX1_HCTL_REG 0x080 /* AKA DSCR1 */
+#define FSP_MBX1_FCTL_REG 0x090
+#define FSP_MBX2_HCTL_REG 0x0a0 /* AKA DSCR2 */
+#define FSP_MBX2_FCTL_REG 0x0b0
+
+/* Bits in the control reg */
+#define FSP_MBX_CTL_PTS (1 << 31)
+#define FSP_MBX_CTL_ABORT (1 << 30)
+#define FSP_MBX_CTL_SPPEND (1 << 29)
+#define FSP_MBX_CTL_HPEND (1 << 28)
+#define FSP_MBX_CTL_XDN (1 << 26)
+#define FSP_MBX_CTL_XUP (1 << 25)
+#define FSP_MBX_CTL_HCHOST_MASK (0xf << 20)
+#define FSP_MBX_CTL_HCHOST_SHIFT 20
+#define FSP_MBX_CTL_DCHOST_MASK (0xff << 12)
+#define FSP_MBX_CTL_DCHOST_SHIFT 12
+#define FSP_MBX_CTL_HCSP_MASK (0xf << 8)
+#define FSP_MBX_CTL_HCSP_SHIFT 8
+#define FSP_MBX_CTL_DCSP_MASK (0xff)
+#define FSP_MBX_CTL_DCSP_SHIFT 0
+
+/* Three header registers owned by the host */
+#define FSP_MBX1_HHDR0_REG 0x84
+#define FSP_MBX1_HHDR1_REG 0x88
+#define FSP_MBX1_HHDR2_REG 0x8C
+#define FSP_MBX2_HHDR0_REG 0xa4
+#define FSP_MBX2_HHDR1_REG 0xa8
+#define FSP_MBX2_HHDR2_REG 0xaC
+
+/* SP Doorbell Error Status register */
+#define FSP_SDES_REG 0xc0
+
+/* Host Doorbell Error Status register */
+#define FSP_HDES_REG 0xc4
+
+/* Bit definitions for both SDES and HDES
+ *
+ * Notes:
+ *
+ * - CLR: is written to clear the status and always reads
+ * as 0. It can be used to detect an error state (a HB
+ * freeze will return all 1's)
+ * - ILLEGAL: illegal operation such as host trying to write
+ * to an FSP only register etc...
+ * - WFULL: set if host tried to write to the SP doorbell while
+ * the pending bit is still set
+ * - REMPTY: tried to read while host pending bit not set
+ * - PAR: SP RAM parity error
+ */
+#define FSP_DBERRSTAT_ILLEGAL1 (1 << 27)
+#define FSP_DBERRSTAT_WFULL1 (1 << 26)
+#define FSP_DBERRSTAT_REMPTY1 (1 << 25)
+#define FSP_DBERRSTAT_PAR1 (1 << 24)
+#define FSP_DBERRSTAT_CLR1 (1 << 16)
+#define FSP_DBERRSTAT_ILLEGAL2 (1 << 11)
+#define FSP_DBERRSTAT_WFULL2 (1 << 10)
+#define FSP_DBERRSTAT_REMPTY2 (1 << 9)
+#define FSP_DBERRSTAT_PAR2 (1 << 8)
+#define FSP_DBERRSTAT_CLR2 (1 << 0)
+
+/* Host Doorbell Interrupt Register and mask
+ *
+ * Note that while HDIR has bits for MBX2, only
+ * MBX1 can actually generate interrupts. Thus only the
+ * MBX1 bits are implemented in the mask register.
+ */
+#define FSP_HDIR_REG 0xc8
+#define FSP_HDIM_SET_REG 0xcc
+#define FSP_HDIM_CLR_REG 0xd0
+#define FSP_DBIRQ_ERROR2 (1 << 10)
+#define FSP_DBIRQ_XUP2 (1 << 9)
+#define FSP_DBIRQ_HPEND2 (1 << 8)
+#define FSP_DBIRQ_ERROR1 (1 << 2)
+#define FSP_DBIRQ_XUP1 (1 << 1)
+#define FSP_DBIRQ_HPEND1 (1 << 0)
+#define FSP_DBIRQ_MBOX1 (FSP_DBIRQ_ERROR1 | FSP_DBIRQ_XUP1 | \
+ FSP_DBIRQ_HPEND1)
+#define FSP_DBIRQ_MBOX2 (FSP_DBIRQ_ERROR2 | FSP_DBIRQ_XUP2 | \
+ FSP_DBIRQ_HPEND2)
+#define FSP_DBIRQ_ALL (FSP_DBIRQ_MBOX1 | FSP_DBIRQ_MBOX2)
+
+/* Doorbell Interrupt Register (FSP internal interrupt latch
+ * read-only on host side
+ */
+#define FSP_PDIR_REG 0xd4
+/* And associated mask */
+#define FSP_PDIM_SET_REG 0xd8
+#define FSP_PDIM_CLR_REG 0xdc
+
+/* Bits for the above */
+#define FSP_PDIRQ_ABORT2 (1 << 7)
+#define FSP_PDIRQ_ABORT1 (1 << 6)
+#define FSP_PDIRQ_ERROR2 (1 << 5)
+#define FSP_PDIRQ_ERROR1 (1 << 4)
+#define FSP_PDIRQ_XDN2 (1 << 3)
+#define FSP_PDIRQ_XDN1 (1 << 2)
+#define FSP_PDIRQ_SPPEND2 (1 << 1)
+#define FSP_PDIRQ_SPPEND1 (1 << 0)
+
+/* FSP owned headers */
+#define FSP_MBX1_FHDR0_REG 0x094
+#define FSP_MBX1_FHDR1_REG 0x098
+#define FSP_MBX1_FHDR2_REG 0x09C
+#define FSP_MBX2_FHDR0_REG 0x0b4
+#define FSP_MBX2_FHDR1_REG 0x0b8
+#define FSP_MBX2_FHDR2_REG 0x0bC
+
+/* Data areas, we can only write to host data, and read from FSP data
+ *
+ * Each area is 0x140 bytes long
+ */
+#define FSP_MBX1_HDATA_AREA 0x100
+#define FSP_MBX1_FDATA_AREA 0x200
+#define FSP_MBX2_HDATA_AREA 0x300
+#define FSP_MBX2_FDATA_AREA 0x400
+
+/* These are scratch registers */
+#define FSP_SCRATCH0_REG 0xe0
+#define FSP_SCRATCH1_REG 0xe4
+#define FSP_SCRATCH2_REG 0xe8
+#define FSP_SCRATCH3_REG 0xec
+
+/* This is what the cmd_sub_mod will have for FSP_MCLASS_RR_EVENT */
+#define FSP_RESET_START 0x1
+#define FSP_RELOAD_COMPLETE 0x2
+
+/*
+ * Message classes
+ */
+
+/* The FSP_MCLASS_RR_EVENT is a special message class that doesn't
+ * participate in mbox event related activities. Its relevant only
+ * for hypervisor internal use. So, handle it specially for command
+ * class extraction too.
+ */
+#define FSP_MCLASS_RR_EVENT 0xaa /* see FSP_R/R defines above */
+#define FSP_MCLASS_FIRST 0xce
+#define FSP_MCLASS_SERVICE 0xce
+#define FSP_MCLASS_IPL 0xcf
+#define FSP_MCLASS_PCTRL_MSG 0xd0
+#define FSP_MCLASS_PCTRL_ABORTS 0xd1
+#define FSP_MCLASS_ERR_LOG 0xd2
+#define FSP_MCLASS_CODE_UPDATE 0xd3
+#define FSP_MCLASS_FETCH_SPDATA 0xd4
+#define FSP_MCLASS_FETCH_HVDATA 0xd5
+#define FSP_MCLASS_NVRAM 0xd6
+#define FSP_MCLASS_MBOX_SURV 0xd7
+#define FSP_MCLASS_RTC 0xd8
+#define FSP_MCLASS_SMART_CHIP 0xd9
+#define FSP_MCLASS_INDICATOR 0xda
+#define FSP_MCLASS_HMC_INTFMSG 0xe0
+#define FSP_MCLASS_HMC_VT 0xe1
+#define FSP_MCLASS_HMC_BUFFERS 0xe2
+#define FSP_MCLASS_SHARK 0xe3
+#define FSP_MCLASS_MEMORY_ERR 0xe4
+#define FSP_MCLASS_CUOD_EVENT 0xe5
+#define FSP_MCLASS_HW_MAINT 0xe6
+#define FSP_MCLASS_VIO 0xe7
+#define FSP_MCLASS_SRC_MSG 0xe8
+#define FSP_MCLASS_DATA_COPY 0xe9
+#define FSP_MCLASS_TONE 0xea
+#define FSP_MCLASS_VIRTUAL_NVRAM 0xeb
+#define FSP_MCLASS_TORRENT 0xec
+#define FSP_MCLASS_NODE_PDOWN 0xed
+#define FSP_MCLASS_DIAG 0xee
+#define FSP_MCLASS_PCIE_LINK_TOPO 0xef
+#define FSP_MCLASS_OCC 0xf0
+#define FSP_MCLASS_LAST 0xf0
+
+/*
+ * Commands are provided in rxxyyzz form where:
+ *
+ * - r is 0: no response or 1: response expected
+ * - xx is class
+ * - yy is subcommand
+ * - zz is mod
+ *
+ * WARNING: We only set the r bit for HV->FSP commands
+ * long run, we want to remove use of that bit
+ * and instead have a table of all commands in
+ * the FSP driver indicating which ones take a
+ * response...
+ */
+
+/*
+ * Class 0xCF
+ */
+#define FSP_CMD_OPL 0x0cf7100 /* HV->FSP: Operational Load Compl. */
+#define FSP_CMD_HV_STATE_CHG 0x0cf0200 /* FSP->HV: Request HV state change */
+#define FSP_RSP_HV_STATE_CHG 0x0cf8200
+#define FSP_CMD_SP_NEW_ROLE 0x0cf0700 /* FSP->HV: FSP assuming a new role */
+#define FSP_RSP_SP_NEW_ROLE 0x0cf8700
+#define FSP_CMD_SP_RELOAD_COMP 0x0cf0102 /* FSP->HV: FSP reload complete */
+
+
+/*
+ * Class 0xCE
+ */
+#define FSP_CMD_ACK_DUMP 0x1ce0200 /* HV->FSP: Dump ack */
+#define FSP_CMD_HYP_MDST_TABLE 0x1ce2600 /* HV->FSP: Sapphire MDST table */
+#define FSP_CMD_CONTINUE_IPL 0x0ce7000 /* FSP->HV: HV has control */
+#define FSP_RSP_SYS_DUMP_OLD 0x0ce7800 /* FSP->HV: Sys Dump Available */
+#define FSP_RSP_SYS_DUMP 0x0ce7802 /* FSP->HV: Sys Dump Available */
+#define FSP_RSP_RES_DUMP 0x0ce7807 /* FSP->HV: Resource Dump Available */
+#define FSP_CMD_CONTINUE_ACK 0x0ce5700 /* HV->FSP: HV acks CONTINUE IPL */
+#define FSP_CMD_HV_FUNCTNAL 0x1ce5707 /* HV->FSP: Set HV functional state */
+#define FSP_CMD_FSP_FUNCTNAL 0x0ce5708 /* FSP->HV: FSP functional state */
+#define FSP_CMD_HV_QUERY_CAPS 0x1ce0400 /* HV->FSP: Query capabilities */
+#define FSP_RSP_HV_QUERY_CAPS 0x1ce8400
+#define FSP_CMD_SP_QUERY_CAPS 0x0ce0501 /* FSP->HV */
+#define FSP_RSP_SP_QUERY_CAPS 0x0ce8500
+#define FSP_CMD_QUERY_SPARM 0x1ce1200 /* HV->FSP: System parameter query */
+#define FSP_RSP_QUERY_SPARM 0x0ce9200 /* FSP->HV: System parameter resp */
+#define FSP_CMD_SET_SPARM_1 0x1ce1301 /* HV->FSP: Set system parameter */
+#define FSP_CMD_SET_SPARM_2 0x1ce1302 /* HV->FSP: Set system parameter TCE */
+#define FSP_RSP_SET_SPARM 0x0ce9300 /* FSP->HV: Set system parameter resp */
+#define FSP_CMD_SP_SPARM_UPD_0 0x0ce1600 /* FSP->HV: Sysparm updated no data */
+#define FSP_CMD_SP_SPARM_UPD_1 0x0ce1601 /* FSP->HV: Sysparm updated data */
+#define FSP_CMD_POWERDOWN_NORM 0x1ce4d00 /* HV->FSP: Normal power down */
+#define FSP_CMD_POWERDOWN_QUICK 0x1ce4d01 /* HV->FSP: Quick power down */
+#define FSP_CMD_POWERDOWN_PCIRS 0x1ce4d02 /* HV->FSP: PCI cfg reset power dwn */
+#define FSP_CMD_REBOOT 0x1ce4e00 /* HV->FSP: Standard IPL */
+#define FSP_CMD_DEEP_REBOOT 0x1ce4e04 /* HV->FSP: Deep IPL */
+#define FSP_CMD_PANELSTATUS 0x0ce5c00 /* FSP->HV */
+#define FSP_CMD_PANELSTATUS_EX1 0x0ce5c02 /* FSP->HV */
+#define FSP_CMD_PANELSTATUS_EX2 0x0ce5c03 /* FSP->HV */
+#define FSP_CMD_ERRLOG_PHYP_ACK 0x1ce0800 /* HV->FSP */
+#define FSP_RSP_ERRLOG_PHYP_ACK 0x0ce8800 /* FSP->HV */
+#define FSP_CMD_ERRLOG_GET_PLID 0x0ce0900 /* FSP->HV: Get PLID */
+#define FSP_RSP_ERRLOG_GET_PLID 0x0ce8900 /* HV->FSP */
+#define FSP_CMD_GET_IPL_SIDE 0x1ce0600 /* HV->FSP: Get IPL side and speed */
+#define FSP_CMD_SET_IPL_SIDE 0x1ce0780 /* HV->FSP: Set next IPL side */
+#define FSP_CMD_PCI_POWER_CONF 0x1ce1b00 /* HV->FSP: Send PCIe list to FSP */
+
+/*
+ * Class 0xD2
+ */
+#define FSP_CMD_CREATE_ERRLOG 0x1d21000 /* HV->FSP */
+#define FSP_RSP_CREATE_ERRLOG 0x0d29000 /* FSP->HV */
+#define FSP_CMD_ERRLOG_NOTIFICATION 0x0d25a00 /* FSP->HV */
+#define FSP_RSP_ERRLOG_NOTIFICATION 0x0d2da00 /* HV->FSP */
+#define FSP_RSP_ELOG_NOTIFICATION_ERROR 0x1d2dafe /* HV->FSP */
+#define FSP_CMD_FSP_DUMP_INIT 0x1d21200 /* HV->FSP: FSP dump init */
+
+/*
+ * Class 0xD0
+ */
+#define FSP_CMD_SPCN_PASSTHRU 0x1d05400 /* HV->FSP */
+#define FSP_RSP_SPCN_PASSTHRU 0x0d0d400 /* FSP->HV */
+
+/*
+ * Class 0xD3
+ */
+#define FSP_CMD_FLASH_START 0x01d30101 /* HV->FSP: Code update start */
+#define FSP_CMD_FLASH_COMPLETE 0x01d30201 /* HV->FSP: Code update complete */
+#define FSP_CMD_FLASH_ABORT 0x01d302ff /* HV->FSP: Code update complete */
+#define FSP_CMD_FLASH_WRITE 0x01d30300 /* HV->FSP: Write LID */
+#define FSP_CMD_FLASH_DEL 0x01d30500 /* HV->FSP: Delete LID */
+#define FSP_CMD_FLASH_NORMAL 0x01d30401 /* HV->FSP: Commit (T -> P) */
+#define FSP_CMD_FLASH_REMOVE 0x01d30402 /* HV->FSP: Reject (P -> T) */
+#define FSP_CMD_FLASH_SWAP 0x01d30403 /* HV->FSP: Swap */
+#define FSP_CMD_FLASH_OUTC 0x00d30601 /* FSP->HV: Out of band commit */
+#define FSP_CMD_FLASH_OUTR 0x00d30602 /* FSP->HV: Out of band reject */
+#define FSP_CMD_FLASH_OUTS 0x00d30603 /* FSP->HV: Out of band swap */
+#define FSP_CMD_FLASH_OUT_RSP 0x00d38600 /* HV->FSP: Out of band Resp */
+#define FSP_CMD_FLASH_CACHE 0x00d30700 /* FSP->HV: Update LID cache */
+#define FSP_CMD_FLASH_CACHE_RSP 0x00d38700 /* HV->FSP: Update LID cache Resp */
+
+/*
+ * Class 0xD4
+ */
+#define FSP_CMD_FETCH_SP_DATA 0x1d40101 /* HV->FSP: Fetch & DMA data */
+#define FSP_CMD_WRITE_SP_DATA 0x1d40201 /* HV->FSP: Fetch & DMA data */
+
+/* Data set IDs for SP data commands */
+#define FSP_DATASET_SP_DUMP 0x01
+#define FSP_DATASET_HW_DUMP 0x02
+#define FSP_DATASET_ERRLOG 0x03 /* error log entry */
+#define FSP_DATASET_MASTER_LID 0x04
+#define FSP_DATASET_NONSP_LID 0x05
+#define FSP_DATASET_ELID_RDATA 0x06
+#define FSP_DATASET_BLADE_PARM 0x07
+#define FSP_DATASET_LOC_PORTMAP 0x08
+#define FSP_DATASET_SYSIND_CAP 0x09
+#define FSP_DATASET_FSP_RSRCDMP 0x0a
+#define FSP_DATASET_HBRT_BLOB 0x0b
+
+/* Adjustment to get T side LIDs */
+#define ADJUST_T_SIDE_LID_NO 0x8000
+
+/*
+ * Class 0xD5
+ */
+#define FSP_CMD_ALLOC_INBOUND 0x0d50400 /* FSP->HV: Allocate inbound buf. */
+#define FSP_RSP_ALLOC_INBOUND 0x0d58400
+
+/*
+ * Class 0xD7
+ */
+#define FSP_CMD_SURV_HBEAT 0x1d70000 /* ? */
+#define FSP_CMD_SURV_ACK 0x0d78000 /* ? */
+
+/*
+ * Class 0xD8
+ */
+#define FSP_CMD_READ_TOD 0x1d82000 /* HV->FSP */
+#define FSP_CMD_READ_TOD_EXT 0x1d82001 /* HV->FSP */
+#define FSP_CMD_WRITE_TOD 0x1d82100 /* HV->FSP */
+#define FSP_CMD_WRITE_TOD_EXT 0x1d82101 /* HV->FSP */
+
+/*
+ * Class 0xDA
+ */
+#define FSP_CMD_GET_LED_LIST 0x00da1101 /* Location code information structure */
+#define FSP_RSP_GET_LED_LIST 0x00da9100
+#define FSP_CMD_RET_LED_BUFFER 0x00da1102 /* Location code buffer information */
+#define FSP_RSP_RET_LED_BUFFER 0x00da9100
+#define FSP_CMD_GET_LED_STATE 0x00da1103 /* Retrieve Indicator State */
+#define FSP_RSP_GET_LED_STATE 0x00da9100
+#define FSP_CMD_SET_LED_STATE 0x00da1104 /* Set Service Indicator State */
+#define FSP_RSP_SET_LED_STATE 0x00da9100
+
+/*
+ * Class 0xE0
+ *
+ * HACK ALERT: We mark E00A01 (associate serial port) as not needing
+ * a response. We need to do that because the FSP will send as a result
+ * an Open Virtual Serial of the same class *and* expect a reply before
+ * it will respond to associate serial port. That breaks our logic of
+ * supporting only one cmd/resp outstanding per class.
+ */
+#define FSP_CMD_HMC_INTF_QUERY 0x0e00100 /* FSP->HV */
+#define FSP_RSP_HMC_INTF_QUERY 0x0e08100 /* HV->FSP */
+#define FSP_CMD_ASSOC_SERIAL 0x0e00a01 /* HV->FSP: Associate with a port */
+#define FSP_RSP_ASSOC_SERIAL 0x0e08a00 /* FSP->HV */
+#define FSP_CMD_UNASSOC_SERIAL 0x0e00b01 /* HV->FSP: Deassociate */
+#define FSP_RSP_UNASSOC_SERIAL 0x0e08b00 /* FSP->HV */
+#define FSP_CMD_OPEN_VSERIAL 0x0e00601 /* FSP->HV: Open serial session */
+#define FSP_RSP_OPEN_VSERIAL 0x0e08600 /* HV->FSP */
+#define FSP_CMD_CLOSE_VSERIAL 0x0e00701 /* FSP->HV: Close serial session */
+#define FSP_RSP_CLOSE_VSERIAL 0x0e08700 /* HV->FSP */
+#define FSP_CMD_CLOSE_HMC_INTF 0x0e00300 /* FSP->HV: Close HMC interface */
+#define FSP_RSP_CLOSE_HMC_INTF 0x0e08300 /* HV->FSP */
+
+/*
+ * Class E1
+ */
+#define FSP_CMD_VSERIAL_IN 0x0e10100 /* FSP->HV */
+#define FSP_CMD_VSERIAL_OUT 0x0e10200 /* HV->FSP */
+
+/*
+ * Class E8
+ */
+#define FSP_CMD_READ_SRC 0x1e84a40 /* HV->FSP */
+#define FSP_CMD_DISP_SRC_INDIR 0x1e84a41 /* HV->FSP */
+#define FSP_CMD_DISP_SRC_DIRECT 0x1e84a42 /* HV->FSP */
+#define FSP_CMD_CLEAR_SRC 0x1e84b00 /* HV->FSP */
+#define FSP_CMD_DIS_SRC_ECHO 0x1e87600 /* HV->FSP */
+
+/*
+ * Class EB
+ */
+#define FSP_CMD_GET_VNVRAM_SIZE 0x01eb0100 /* HV->FSP */
+#define FSP_CMD_OPEN_VNVRAM 0x01eb0200 /* HV->FSP */
+#define FSP_CMD_READ_VNVRAM 0x01eb0300 /* HV->FSP */
+#define FSP_CMD_WRITE_VNVRAM 0x01eb0400 /* HV->FSP */
+#define FSP_CMD_GET_VNV_STATS 0x00eb0500 /* FSP->HV */
+#define FSP_RSP_GET_VNV_STATS 0x00eb8500
+#define FSP_CMD_FREE_VNV_STATS 0x00eb0600 /* FSP->HV */
+#define FSP_RSP_FREE_VNV_STATS 0x00eb8600
+
+/*
+ * Class 0xEE
+ */
+#define FSP_RSP_DIAG_LINK_ERROR 0x00ee1100 /* FSP->HV */
+#define FSP_RSP_DIAG_ACK_TIMEOUT 0x00ee0000 /* FSP->HV */
+
+/*
+ * Class F0
+ */
+#define FSP_CMD_LOAD_OCC 0x00f00100 /* FSP->HV */
+#define FSP_RSP_LOAD_OCC 0x00f08100 /* HV->FSP */
+#define FSP_CMD_LOAD_OCC_STAT 0x01f00300 /* HV->FSP */
+#define FSP_CMD_RESET_OCC 0x00f00200 /* FSP->HV */
+#define FSP_RSP_RESET_OCC 0x00f08200 /* HV->FSP */
+#define FSP_CMD_RESET_OCC_STAT 0x01f00400 /* HV->FSP */
+
+/*
+ * Class E4
+ */
+#define FSP_CMD_MEM_RES_CE 0x00e40300 /* FSP->HV: Memory resilience CE */
+#define FSP_CMD_MEM_RES_UE 0x00e40301 /* FSP->HV: Memory resilience UE */
+#define FSP_CMD_MEM_RES_UE_SCRB 0x00e40302 /* FSP->HV: UE detected by scrub */
+#define FSP_RSP_MEM_RES 0x00e48300 /* HV->FSP */
+#define FSP_CMD_MEM_DYN_DEALLOC 0x00e40500 /* FSP->HV: Dynamic mem dealloc */
+#define FSP_RSP_MEM_DYN_DEALLOC 0x00e48500 /* HV->FSP */
+
+/*
+ * Functions exposed to the rest of skiboot
+ */
+
+/* An FSP message */
+
+enum fsp_msg_state {
+ fsp_msg_unused = 0,
+ fsp_msg_queued,
+ fsp_msg_sent,
+ fsp_msg_wresp,
+ fsp_msg_done,
+ fsp_msg_timeout,
+ fsp_msg_incoming,
+ fsp_msg_response,
+ fsp_msg_cancelled,
+};
+
+struct fsp_msg {
+ /*
+ * User fields. Don't populate word0.seq (upper 16 bits), this
+ * will be done by fsp_queue_msg()
+ */
+ u8 dlen; /* not including word0/word1 */
+ u32 word0; /* seq << 16 | cmd */
+ u32 word1; /* mod << 8 | sub */
+ union {
+ u32 words[14];
+ u8 bytes[56];
+ } data;
+
+ /* Completion function. Called with no lock held */
+ void (*complete)(struct fsp_msg *msg);
+ void *user_data;
+
+ /*
+ * Driver updated fields
+ */
+
+ /* Current msg state */
+ enum fsp_msg_state state;
+
+ /* Set if the message expects a response */
+ bool response;
+
+ /* See if the message is a 'sync' message, and if so do we have to
+ * 'free' the message once sent. That is, do we need to wait on this
+ * message, till it gets sent to the FSP. This indication is useful
+ * only in the case where we experience a PSI link loss, or a FSP
+ * r/r, and need to queue up messages till the PSI link / FSP come back
+ * up again.
+ */
+ bool sync_msg;
+ bool auto_free;
+
+ /* Response will be filed by driver when response received */
+ struct fsp_msg *resp;
+
+ /* Internal queuing */
+ struct list_node link;
+};
+
+/* This checks if a message is still "in progress" in the FSP driver */
+static inline bool fsp_msg_busy(struct fsp_msg *msg)
+{
+ switch(msg->state) {
+ case fsp_msg_unused:
+ case fsp_msg_done:
+ case fsp_msg_timeout:
+ case fsp_msg_response: /* A response is considered a completed msg */
+ return false;
+ default:
+ break;
+ }
+ return true;
+}
+
+/* Initialize the FSP mailbox driver */
+extern void fsp_init(void);
+
+/* Perform the OPL sequence */
+extern void fsp_opl(void);
+
+/* Check if system has an FSP */
+extern bool fsp_present(void);
+
+/* Allocate and populate an fsp_msg structure
+ *
+ * WARNING: Do _NOT_ use free() on an fsp_msg, use fsp_freemsg()
+ * instead as we will eventually use pre-allocated message pools
+ */
+extern struct fsp_msg *fsp_allocmsg(bool alloc_response);
+extern struct fsp_msg *fsp_mkmsg(u32 cmd_sub_mod, u8 add_words, ...);
+
+/* Populate a pre-allocated msg */
+extern void fsp_fillmsg(struct fsp_msg *msg, u32 cmd_sub_mod, u8 add_words, ...);
+
+/* Free a message
+ *
+ * WARNING: This will also free an attached response if any
+ */
+extern void fsp_freemsg(struct fsp_msg *msg);
+
+/* Free a message and not the attached reply */
+extern void __fsp_freemsg(struct fsp_msg *msg);
+
+/* Cancel a message from the msg queue
+ *
+ * WARNING: * This is intended for use only in the FSP r/r scenario.
+ * * This will also free an attached response if any
+ */
+extern void fsp_cancelmsg(struct fsp_msg *msg);
+
+/* Enqueue it in the appropriate FSP queue
+ *
+ * NOTE: This supports being called with the FSP lock already
+ * held. This is the only function in this module that does so
+ * and is meant to be used that way for sending serial "poke"
+ * commands to the FSP.
+ */
+extern int fsp_queue_msg(struct fsp_msg *msg,
+ void (*comp)(struct fsp_msg *msg));
+
+/* Synchronously send a command. If there's a response, the status is
+ * returned as a positive number. A negative result means an error
+ * sending the message.
+ *
+ * If autofree is set, the message and the reply (if any) are freed
+ * after extracting the status. If not set, you are responsible for
+ * freeing both the message and an eventual response
+ *
+ * NOTE: This will call fsp_queue_msg(msg, NULL), hence clearing the
+ * completion field of the message. No synchronous message is expected
+ * to utilize asynchronous completions.
+ */
+extern int fsp_sync_msg(struct fsp_msg *msg, bool autofree);
+
+/* Process FSP mailbox activity */
+extern void fsp_poll(void);
+
+/* Handle FSP interrupts */
+extern void fsp_interrupt(void);
+
+/* An FSP client is interested in messages for a given class */
+struct fsp_client {
+ /* Return true to "own" the message (you can free it) */
+ bool (*message)(u32 cmd_sub_mod, struct fsp_msg *msg);
+ struct list_node link;
+};
+
+/* WARNING: Command class FSP_MCLASS_IPL is aliased to FSP_MCLASS_SERVICE,
+ * thus a client of one will get both types of messages.
+ *
+ * WARNING: Client register/unregister takes *NO* lock. These are expected
+ * to be called early at boot before CPUs are brought up and before
+ * fsp_poll() can race. The client callback is called with no lock held.
+ */
+extern void fsp_register_client(struct fsp_client *client, u8 msgclass);
+extern void fsp_unregister_client(struct fsp_client *client, u8 msgclass);
+
+/* FSP TCE map/unmap functions */
+extern void fsp_tce_map(u32 offset, void *addr, u32 size);
+extern void fsp_tce_unmap(u32 offset, u32 size);
+extern void *fsp_inbound_buf_from_tce(u32 tce_token);
+
+/* Data fetch helper */
+extern uint32_t fsp_adjust_lid_side(uint32_t lid_no);
+extern int fsp_fetch_data(uint8_t flags, uint16_t id, uint32_t sub_id,
+ uint32_t offset, void *buffer, size_t *length);
+extern int fsp_fetch_data_queue(uint8_t flags, uint16_t id, uint32_t sub_id,
+ uint32_t offset, void *buffer, size_t *length,
+ void (*comp)(struct fsp_msg *msg));
+
+/* FSP console stuff */
+extern void fsp_console_preinit(void);
+extern void fsp_console_init(void);
+extern void fsp_console_add_nodes(void);
+extern void fsp_console_select_stdout(void);
+extern void fsp_console_reset(void);
+extern void fsp_console_poll(void *);
+
+/* Mark FSP lock */
+extern void fsp_used_by_console(void);
+
+/* NVRAM */
+extern int fsp_nvram_info(uint32_t *total_size);
+extern int fsp_nvram_start_read(void *dst, uint32_t src, uint32_t len);
+extern int fsp_nvram_write(uint32_t offset, void *src, uint32_t size);
+extern void fsp_nvram_wait_open(void);
+
+/* RTC */
+extern void fsp_rtc_init(void);
+extern int fsp_rtc_get_cached_tod(uint32_t *year_month_day,
+ uint64_t *hour_minute_second_millisecond);
+
+/* ELOG */
+extern void fsp_elog_read_init(void);
+extern void fsp_elog_write_init(void);
+
+/* Code update */
+extern void fsp_code_update_init(void);
+extern void fsp_code_update_wait_vpd(bool is_boot);
+
+/* Dump */
+extern void fsp_dump_init(void);
+extern void fsp_fips_dump_notify(uint32_t dump_id, uint32_t dump_len);
+
+/* MDST table */
+extern void fsp_mdst_table_init(void);
+
+/* This can be set by the fsp_opal_update_flash so that it can
+ * get called just reboot we reboot shutdown the machine.
+ */
+extern int (*fsp_flash_term_hook)(void);
+
+/* Surveillance */
+extern void fsp_init_surveillance(void);
+extern void fsp_surv_query(void);
+
+/* Reset/Reload */
+extern void fsp_reinit_fsp(void);
+extern void fsp_trigger_reset(void);
+
+/* FSP memory errors */
+extern void fsp_memory_err_init(void);
+
+/* Sensor */
+extern void fsp_init_sensor(void);
+
+/* Diagnostic */
+extern void fsp_init_diag(void);
+
+/* LED */
+extern void fsp_led_init(void);
+extern void fsp_get_led_list(struct fsp_msg *msg);
+extern void fsp_free_led_list_buf(struct fsp_msg *msg);
+extern void fsp_get_led_state(struct fsp_msg *msg);
+extern void fsp_set_led_state(struct fsp_msg *msg);
+
+#endif /* __FSP_H */
diff --git a/include/gx.h b/include/gx.h
new file mode 100644
index 0000000..f633892
--- /dev/null
+++ b/include/gx.h
@@ -0,0 +1,59 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Definitions relative to the P7 and P7+ GX controller
+ */
+#ifndef __GX_H
+#define __GX_H
+
+#include <bitutils.h>
+
+/* P7 GX Mode 1 register (contains PSI BUID) */
+#define GX_P7_MODE1_REG 0x0201180A
+#define GX_P7_MODE1_PSI_BUID_MASK PPC_BITMASK(18,26)
+#define GX_P7_MODE1_PSI_BUID_LSH PPC_BITLSHIFT(26)
+#define GX_P7_MODE1_PSI_BUID_DISABLE PPC_BIT(27)
+
+/* P7+ GX Mode 4 register (PSI and NX BUIDs ) */
+#define GX_P7P_MODE4_REG 0x02011811
+#define GX_P7P_MODE4_ENABLE_NX_BUID PPC_BIT(0)
+#define GX_P7P_MODE4_NX_BUID_BASE_MASK PPC_BITMASK(1,9)
+#define GX_P7P_MODE4_NX_BUID_BASE_LSH PPC_BITLSHIFT(9)
+#define GX_P7P_MODE4_NX_BUID_MASK_MASK PPC_BITMASK(10,18)
+#define GX_P7P_MODE4_NX_BUID_MASK_LSH PPC_BITLSHIFT(18)
+#define GX_P7P_MODE4_PSI_BUID_MASK PPC_BITMASK(19,27)
+#define GX_P7P_MODE4_PSI_BUID_LSH PPC_BITLSHIFT(27)
+#define GX_P7P_MODE4_PSI_BUID_DISABLE PPC_BIT(28)
+
+/* P7 GX TCE BAR and mask */
+#define GX_P7_GX0_TCE_BAR 0x02011845
+#define GX_P7_TCE_BAR_ADDR_MASK PPC_BITMASK(0,25)
+#define GX_P7_TCE_BAR_ADDR_LSH PPC_BITLSHIFT(25)
+#define GX_P7_TCE_BAR_ADDR_SHIFT PPC_BITLSHIFT(43)
+#define GX_P7_TCE_BAR_ENABLE PPC_BIT(26)
+#define GX_P7_GX0_TCE_MASK 0x0201184B
+#define GX_P7_TCE_MASK_MASK PPC_BITMASK(0,25)
+#define GX_P7_TCE_MASK_LSH PPC_BITLSHIFT(25)
+#define GX_P7_GX1_TCE_BAR 0x02011885
+#define GX_P7_GX1_TCE_MASK 0x0201188B
+
+
+extern int gx_configure_psi_buid(uint32_t chip, uint32_t buid);
+extern int gx_configure_tce_bar(uint32_t chip, uint32_t gx, uint64_t addr,
+ uint64_t size);
+
+#endif /* __GX_H */
diff --git a/include/hostservices.h b/include/hostservices.h
new file mode 100644
index 0000000..23e2a83
--- /dev/null
+++ b/include/hostservices.h
@@ -0,0 +1,36 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HOSTSERVICES_H
+#define __HOSTSERVICES_H
+
+bool hservices_init(void);
+
+int host_services_occ_load(void);
+int host_services_occ_start(void);
+void host_services_occ_base_setup(void);
+
+/* No LID can be larger than 16M, but OCC lid is less than 1 MB */
+
+#define HBRT_LOAD_LID_SIZE 0x100000 /* 1MB */
+
+/* TODO: Detect OCC lid size at runtime */
+
+/* Homer and OCC area size */
+#define HOMER_IMAGE_SIZE 0x400000 /* 4MB per-chip */
+#define OCC_COMMON_SIZE 0x800000 /* 8MB */
+
+#endif /* __HOSTSERVICES_H */
diff --git a/include/interrupts.h b/include/interrupts.h
new file mode 100644
index 0000000..9239b86
--- /dev/null
+++ b/include/interrupts.h
@@ -0,0 +1,254 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INTERRUPTS_H
+#define __INTERRUPTS_H
+
+#include <stdint.h>
+#include <ccan/list/list.h>
+
+/*
+ * Note about interrupt numbers on P7/P7+
+ * ======================================
+ *
+ * The form of an interrupt number in the system on P7/P7+ is as follow:
+ *
+ * | Node | T| Chip|GX| BUID | Level |
+ * |--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|--|
+ *
+ * Where:
+ *
+ * - Node : The 3-bit node number
+ * - T : 1 for a Torrent chip, 0 otherwise
+ * - Chip : 2-bit chip number in a node
+ * - GX : GX bus identifier
+ * - BUID : Bus identifier (*)
+ * - Level : Interrupt number
+ *
+ * (*) The BUID/Level distinction is mostly historical, interrupt
+ * controllers such as the ICS in the PHBs "use" some of the
+ * low BUID bits as an extension to the interrupt number
+ *
+ * The NodeID and ChipID together form a 5-bit Processor Chip ID as
+ * found in the PIR or in the SPIRA data structures (without the T bit)
+ *
+ * PSI interrupt numbering scheme:
+ * -------------------------------
+ *
+ * This is tentatively deduced from stuff I found in some SCOM regs
+ * and in the BookIV. The PSIHB can be used to specify the 9-bit BUID,
+ * the Level is always 0. The doc also says that it prepends the 6-bit
+ * PowerBus chipID (Node + T + Chip). I *assume* that it also prepends
+ * a 0 in place of the GX bit.
+ *
+ * OPAL seems to be arbitrarily using a BUID value of 0x3, I shall do
+ * the same "just in case" :-)
+ *
+ * NOTE: From grep'ing around the giant SCOM file for "Build", I found
+ * what looks like a register in the GX controller (Mode1
+ * register) where the PSI BUID can be stored as well. From
+ * looking around with the FSP getscom command, it appears
+ * that both pHyp and OPAL set this consistently to the same
+ * value that appears in the PHB configuration.
+ *
+ * => This is confirmed. The NX needs a similar configuration, this
+ * tells the GX controller not to forward transactions for these
+ * BUIDs down the GX bus.
+ *
+ * PCI interrupt numbering scheme:
+ * -------------------------------
+ *
+ * See IOCs
+ *
+ * NX interrupt numbering scheme (p7+):
+ * ------------------------------------
+ *
+ * TBD
+ *
+ *
+ * Additional note about routing of interrupts in P7 and P7+
+ * =========================================================
+ *
+ * There are two on-chip sources of interrupts on these that need a
+ * special treatment: The PSI interrupt and the NX interrupts.
+ *
+ * The problem is that they use the same BUID space as the IO chips
+ * connected to the GX bus, so the GX controller needs to be told
+ * about these BUIDs in order to avoid forwarding them down the GX
+ * link (and possibly choking due to the lack of reply).
+ *
+ * The bad news is that it's all undocumented. The good news is that
+ * I found the info after chatting with Bill Daly (HW design) and
+ * looking at the SCOM register maps.
+ *
+ * The way to set that up differs between P7 and P7+:
+ *
+ * - On P7, it's in the GX_MODE1 register at SCOM 0x0201180A, which
+ * among other things, contains those bits:
+ *
+ * 18:26 PSI_BUID: BUID to be used to indicate the interrupt is
+ * for the PSI
+ * 27 DISABLE_PSI_BUID: set to 1 to disable the buid reservation
+ * for PSI
+ *
+ * So one must write the 9-bit BUID (without the top chipID) of the
+ * PSI interrupt in there and clear the disable bit.
+ *
+ * - On P7+ it's in the GX_MODE4 register at SCOM 0x02011811
+ *
+ * 0 ENABLE_NX_BUID: set to 1 to enable the buid reservation for nx
+ * 1:9 NX_BUID_BASE: BUID BASE to be used to indicate the interrupt
+ * is for the nx
+ * 10:18 NX_BUID_MASK: BUID mask for the nx buid base
+ * 19:27 PSI_BUID: BUID to be used to indicate the interrupt is for
+ * the PSI
+ * 28 DISABLE_PSI_BUID: set to 1 to disable the buid reservation
+ * for PSI
+ *
+ * Note: The NX_BUID_MASK should have bits set to 1 that are relevant for
+ * the comparison to NX_BUID_BASE, ie 4 interrupts means a mask
+ * value of b'111111100
+ */
+
+#define P7_PSI_IRQ_BUID 0x3 /* 9-bit BUID for the PSI interrupts */
+
+/* Extract individual components of an IRQ number */
+#define P7_IRQ_BUID(irq) (((irq) >> 4) & 0x1ff)
+#define P7_IRQ_GXID(irq) (((irq) >> 13) & 0x1)
+#define P7_IRQ_CHIP(irq) (((irq) >> 14) & 0x3)
+#define P7_IRQ_TBIT(irq) (((irq) >> 16) & 0x1)
+#define P7_IRQ_NODE(irq) (((irq) >> 17) & 0x7)
+
+/* Extract the "full BUID" (extension + BUID) */
+#define P7_IRQ_FBUID(irq) (((irq) >> 4) & 0xffff)
+
+/* BUID Extension (GX + CHIP + T + NODE) */
+#define P7_IRQ_BEXT(irq) (((irq) >> 13) & 0x7f)
+
+/* Strip extension from BUID */
+#define P7_BUID_BASE(buid) ((buid) & 0x1ff)
+
+
+/* Note about interrupt numbers on P8
+ * ==================================
+ *
+ * On P8 the interrupts numbers are just a flat space of 19-bit,
+ * there is no BUID or similar.
+ *
+ * However, various unit tend to require blocks of interrupt that
+ * are naturally power-of-two aligned
+ *
+ * Our P8 Interrupt map consits thus of dividing the chip space
+ * into 4 "blocks" of 2048 interrupts. Block 0 is for random chip
+ * interrupt sources (NX, PSI, OCC, ...) and keeps sources 0..15
+ * clear to avoid conflits with IPIs etc.... Block 1..3 are assigned
+ * to PHB 0..2 respectively.
+ *
+ * That gives us an interrupt number made of:
+ * 18 13 12 11 10 0
+ * | | | | | |
+ * +--------------------+------+-----------------------------+
+ * | Chip# | PHB# | IVE# |
+ * +--------------------+------+-----------------------------+
+ *
+ * We can thus support a max of 2^6 = 64 chips
+ *
+ * Each PHB supports 2K interrupt sources, which is shared by
+ * LSI and MSI. With default configuration, MSI would use range
+ * [0, 0x7f7] and LSI would use [0x7f8, 0x7ff]. The interrupt
+ * source should be combined with IRSN to form final hardware
+ * IRQ.
+ *
+ */
+
+#define P8_CHIP_IRQ_BASE(chip) ((chip) << 13)
+#define P8_CHIP_IRQ_BLOCK_BASE(chip, block) (P8_CHIP_IRQ_BASE(chip) \
+ | ((block) << 11))
+#define P8_IRQ_BLOCK_MISC 0
+#define P8_IRQ_BLOCK_PHB0 1
+#define P8_IRQ_BLOCK_PHB1 2
+#define P8_IRQ_BLOCK_PHB2 3
+
+#define P8_CHIP_IRQ_PHB_BASE(chip, phb) (P8_CHIP_IRQ_BLOCK_BASE(chip,\
+ (phb) + P8_IRQ_BLOCK_PHB0))
+
+#define P8_IRQ_TO_CHIP(irq) (((irq) >> 13) & 0x3f)
+#define P8_IRQ_TO_BLOCK(irq) (((irq) >> 11) & 0x03)
+#define P8_IRQ_TO_PHB(irq) (P8_IRQ_TO_BLOCK(irq) - \
+ P8_IRQ_BLOCK_PHB0)
+
+/* Assignment of the "MISC" block:
+ * -------------------------------
+ *
+ * PSI interface has 6 interrupt sources:
+ *
+ * FSP, OCC, FSI, LPC, Local error, Host error
+ *
+ * and thus needs a block of 8
+ */
+#define P8_IRQ_MISC_PSI_BASE 0x10 /* 0x10..0x17 */
+
+/* These are handled by skiboot */
+#define P8_IRQ_PSI_SKIBOOT_BASE 0
+#define P8_IRQ_PSI_FSP 0
+#define P8_IRQ_PSI_OCC 1
+#define P8_IRQ_PSI_FSI 2
+#define P8_IRQ_PSI_LPC 3
+#define P8_IRQ_PSI_LOCAL_ERR 4
+#define P8_IRQ_PSI_LOCAL_COUNT 5
+#define P8_IRQ_PSI_ALL_COUNT 6
+
+/* These are passed onto Linux */
+#define P8_IRQ_PSI_LINUX_BASE 5
+#define P8_IRQ_PSI_HOST_ERR 5 /* Used for UART */
+#define P8_IRQ_PSI_LINUX_COUNT 1
+
+/* TBD: NX, AS, ...
+ */
+
+/*
+ * IRQ sources register themselves here. If an "interrupts" callback
+ * is provided, then all interrupts in that source will appear in
+ * 'opal-interrupts' and will be handled by us.
+ */
+struct irq_source_ops {
+ int64_t (*set_xive)(void *data, uint32_t isn, uint16_t server,
+ uint8_t priority);
+ int64_t (*get_xive)(void *data, uint32_t isn, uint16_t *server,
+ uint8_t *priority);
+ void (*interrupt)(void *data, uint32_t isn);
+};
+
+extern void register_irq_source(const struct irq_source_ops *ops, void *data,
+ uint32_t start, uint32_t count);
+extern void unregister_irq_source(uint32_t start, uint32_t count);
+
+extern uint32_t get_psi_interrupt(uint32_t chip_id);
+
+extern struct dt_node *add_ics_node(void);
+extern void add_opal_interrupts(void);
+extern uint32_t get_ics_phandle(void);
+
+struct cpu_thread;
+
+extern void reset_cpu_icp(void);
+extern void icp_send_eoi(uint32_t interrupt);
+extern void icp_prep_for_rvwinkle(void);
+extern void icp_kick_cpu(struct cpu_thread *cpu);
+
+extern void init_interrupts(void);
+
+#endif /* __INTERRUPTS_H */
diff --git a/include/io.h b/include/io.h
new file mode 100644
index 0000000..45a385e
--- /dev/null
+++ b/include/io.h
@@ -0,0 +1,175 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __IO_H
+#define __IO_H
+
+#ifndef __ASSEMBLY__
+
+#include <compiler.h>
+#include <stdint.h>
+#include <processor.h>
+#include <ccan/endian/endian.h>
+
+/*
+ * IO access functions
+ *
+ * __in_beXX() / __out_beXX() : non-byteswap, no barrier
+ * in_beXX() / out_beXX() : non-byteswap, barrier
+ * in_leXX() / out_leXX() : byteswap, barrier
+ */
+
+static inline uint8_t __in_8(const volatile uint8_t *addr)
+{
+ uint8_t val;
+ asm volatile("lbzcix %0,0,%1" :
+ "=r"(val) : "r"(addr), "m"(*addr) : "memory");
+ return val;
+}
+
+static inline uint8_t in_8(const volatile uint8_t *addr)
+{
+ sync();
+ return __in_8(addr);
+}
+
+static inline uint16_t __in_be16(const volatile uint16_t *addr)
+{
+ uint16_t val;
+ asm volatile("lhzcix %0,0,%1" :
+ "=r"(val) : "r"(addr), "m"(*addr) : "memory");
+ return val;
+}
+
+static inline uint16_t in_be16(const volatile uint16_t *addr)
+{
+ sync();
+ return __in_be16(addr);
+}
+
+static inline uint16_t in_le16(const volatile uint16_t *addr)
+{
+ return bswap_16(in_be16(addr));
+}
+
+static inline uint32_t __in_be32(const volatile uint32_t *addr)
+{
+ uint32_t val;
+ asm volatile("lwzcix %0,0,%1" :
+ "=r"(val) : "r"(addr), "m"(*addr) : "memory");
+ return val;
+}
+
+static inline uint32_t in_be32(const volatile uint32_t *addr)
+{
+ sync();
+ return __in_be32(addr);
+}
+
+static inline uint32_t in_le32(const volatile uint32_t *addr)
+{
+ return bswap_32(in_be32(addr));
+}
+
+static inline uint64_t __in_be64(const volatile uint64_t *addr)
+{
+ uint64_t val;
+ asm volatile("ldcix %0,0,%1" :
+ "=r"(val) : "r"(addr), "m"(*addr) : "memory");
+ return val;
+}
+
+static inline uint64_t in_be64(const volatile uint64_t *addr)
+{
+ sync();
+ return __in_be64(addr);
+}
+
+static inline uint64_t in_le64(const volatile uint64_t *addr)
+{
+ return bswap_64(in_be64(addr));
+}
+
+static inline void __out_8(volatile uint8_t *addr, uint8_t val)
+{
+ asm volatile("stbcix %0,0,%1"
+ : : "r"(val), "r"(addr), "m"(*addr) : "memory");
+}
+
+static inline void out_8(volatile uint8_t *addr, uint8_t val)
+{
+ sync();
+ return __out_8(addr, val);
+}
+
+static inline void __out_be16(volatile uint16_t *addr, uint16_t val)
+{
+ asm volatile("sthcix %0,0,%1"
+ : : "r"(val), "r"(addr), "m"(*addr) : "memory");
+}
+
+static inline void out_be16(volatile uint16_t *addr, uint16_t val)
+{
+ sync();
+ return __out_be16(addr, val);
+}
+
+static inline void out_le16(volatile uint16_t *addr, uint16_t val)
+{
+ out_be16(addr, bswap_16(val));
+}
+
+static inline void __out_be32(volatile uint32_t *addr, uint32_t val)
+{
+ asm volatile("stwcix %0,0,%1"
+ : : "r"(val), "r"(addr), "m"(*addr) : "memory");
+}
+
+static inline void out_be32(volatile uint32_t *addr, uint32_t val)
+{
+ sync();
+ return __out_be32(addr, val);
+}
+
+static inline void out_le32(volatile uint32_t *addr, uint32_t val)
+{
+ out_be32(addr, bswap_32(val));
+}
+
+static inline void __out_be64(volatile uint64_t *addr, uint64_t val)
+{
+ asm volatile("stdcix %0,0,%1"
+ : : "r"(val), "r"(addr), "m"(*addr) : "memory");
+}
+
+static inline void out_be64(volatile uint64_t *addr, uint64_t val)
+{
+ sync();
+ return __out_be64(addr, val);
+}
+
+static inline void out_le64(volatile uint64_t *addr, uint64_t val)
+{
+ out_be64(addr, bswap_64(val));
+}
+
+/* Assistant to macros used to access PCI config space */
+#define in_le8 in_8
+#define out_le8 out_8
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __IO_H */
diff --git a/include/lock.h b/include/lock.h
new file mode 100644
index 0000000..f24e769
--- /dev/null
+++ b/include/lock.h
@@ -0,0 +1,83 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LOCK_H
+#define __LOCK_H
+
+#include <stdbool.h>
+
+struct lock {
+ /* Lock value has bit 63 as lock bit and the PIR of the owner
+ * in the top 32-bit
+ */
+ unsigned long lock_val;
+
+ /*
+ * Set to true if lock is involved in the console flush path
+ * in which case taking it will suspend console flushing
+ */
+ bool in_con_path;
+};
+
+/* Initializer */
+#define LOCK_UNLOCKED { .lock_val = 0, .in_con_path = 0 }
+
+/* Note vs. libc and locking:
+ *
+ * The printf() family of
+ * functions use stack based t buffers and call into skiboot
+ * underlying read() and write() which use a console lock.
+ *
+ * The underlying FSP console code will thus operate within that
+ * console lock.
+ *
+ * The libc does *NOT* lock stream buffer operations, so don't
+ * try to scanf() from the same FILE from two different processors.
+ *
+ * FSP operations are locked using an FSP lock, so all processors
+ * can safely call the FSP API
+ *
+ * Note about ordering:
+ *
+ * lock() is a full memory barrier. unlock() is a lwsync
+ *
+ */
+
+extern bool bust_locks;
+
+static inline void init_lock(struct lock *l)
+{
+ l->lock_val = 0;
+ l->in_con_path = false;
+}
+
+extern bool __try_lock(struct lock *l);
+extern bool try_lock(struct lock *l);
+extern void lock(struct lock *l);
+extern void unlock(struct lock *l);
+
+/* The debug output can happen while the FSP lock, so we need some kind
+ * of recursive lock support here. I don't want all locks to be recursive
+ * though, thus the caller need to explicitly call lock_recursive which
+ * returns false if the lock was already held by this cpu. If it returns
+ * true, then the caller shall release it when done.
+ */
+extern bool lock_recursive(struct lock *l);
+
+/* Called after per-cpu data structures are available */
+extern void init_locks(void);
+
+#endif /* __LOCK_H */
diff --git a/include/lpc.h b/include/lpc.h
new file mode 100644
index 0000000..47d1037
--- /dev/null
+++ b/include/lpc.h
@@ -0,0 +1,95 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LPC_H
+#define __LPC_H
+
+#include <opal.h>
+#include <ccan/endian/endian.h>
+
+/* Routines for accessing the LPC bus on Power8 */
+
+extern void lpc_init(void);
+
+/* Check for a default bus */
+extern bool lpc_present(void);
+
+/* Default bus accessors */
+extern int64_t lpc_write(enum OpalLPCAddressType addr_type, uint32_t addr,
+ uint32_t data, uint32_t sz);
+extern int64_t lpc_read(enum OpalLPCAddressType addr_type, uint32_t addr,
+ uint32_t *data, uint32_t sz);
+
+/* Mark LPC bus as used by console */
+extern void lpc_used_by_console(void);
+
+/*
+ * Simplified big endian FW accessors
+ */
+static inline int64_t lpc_fw_read32(uint32_t *val, uint32_t addr)
+{
+ return lpc_read(OPAL_LPC_FW, addr, val, 4);
+}
+
+static inline int64_t lpc_fw_write32(uint32_t val, uint32_t addr)
+{
+ return lpc_write(OPAL_LPC_FW, addr, cpu_to_be64(val), 4);
+}
+
+
+/*
+ * Simplified Little Endian IO space accessors
+ *
+ * Note: We do *NOT* handle unaligned accesses
+ */
+
+static inline void lpc_outb(uint8_t data, uint32_t addr)
+{
+ lpc_write(OPAL_LPC_IO, addr, data, 1);
+}
+
+static inline uint8_t lpc_inb(uint32_t addr)
+{
+ uint32_t d32;
+ int64_t rc = lpc_read(OPAL_LPC_IO, addr, &d32, 1);
+ return (rc == OPAL_SUCCESS) ? d32 : 0xff;
+}
+
+static inline void lpc_outw(uint16_t data, uint32_t addr)
+{
+ lpc_write(OPAL_LPC_IO, addr, cpu_to_le16(data), 2);
+}
+
+static inline uint16_t lpc_inw(uint32_t addr)
+{
+ uint32_t d32;
+ int64_t rc = lpc_read(OPAL_LPC_IO, addr, &d32, 2);
+ return (rc == OPAL_SUCCESS) ? le16_to_cpu(d32) : 0xffff;
+}
+
+static inline void lpc_outl(uint32_t data, uint32_t addr)
+{
+ lpc_write(OPAL_LPC_IO, addr, cpu_to_le32(data), 4);
+}
+
+static inline uint32_t lpc_inl(uint32_t addr)
+{
+ uint32_t d32;
+ int64_t rc = lpc_read(OPAL_LPC_IO, addr, &d32, 4);
+ return (rc == OPAL_SUCCESS) ? le32_to_cpu(d32) : 0xffffffff;
+}
+
+#endif /* __LPC_H */
diff --git a/include/mem-map.h b/include/mem-map.h
new file mode 100644
index 0000000..effd689
--- /dev/null
+++ b/include/mem-map.h
@@ -0,0 +1,114 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEM_MAP_H
+#define __MEM_MAP_H
+
+/* This is our main offset for relocation. All our buffers
+ * are offset from that and our code relocates itself to
+ * that location
+ */
+#define SKIBOOT_BASE 0x30000000
+
+/* Stack size set to 16K, some of it will be used for
+ * machine check (see stack.h)
+ */
+#define STACK_SHIFT 14
+#define STACK_SIZE (1 << STACK_SHIFT)
+
+/* The NACA and other stuff in head.S need to be at the start: we
+ * give it 64k before placing the SPIRA and related data.
+ */
+#define SPIRA_OFF 0x00010000
+
+/* SPIRA is 2k, then follow with for proc_init_data (aka PROCIN).
+ * These need to be at fixed addresses in case we're ever little
+ * endian: linker can't endian reverse a pointer for us. Text, data
+ * et. al. follows this.
+ */
+#define PROCIN_OFF (SPIRA_OFF + 0x800)
+
+/* Initial MDST table like PROCIN, we need fixed addresses,
+ * we leave a 2k gap for PROCIN
+ */
+#define MDST_TABLE_OFF (SPIRA_OFF + 0x1000)
+
+/* We keep a gap of 2M for skiboot text & bss for now. We will
+ * then we have our heap which goes up to base + 14M (so 12M for
+ * now, though we can certainly reduce that a lot).
+ *
+ * Ideally, we should fix the heap end and use _end to basically
+ * initialize our heap so that it covers anything from _end to
+ * that heap end, avoiding wasted space.
+ *
+ * That's made a bit tricky however due to how we create those
+ * regions statically in mem_region.c, but still on the list of
+ * things to improve.
+ *
+ * As of this writing (2014/4/6), we use approc 512K for skiboot
+ * core and 2M of heap on a 1 socket machine.
+ */
+#define HEAP_BASE (SKIBOOT_BASE + 0x00200000)
+#define HEAP_SIZE 0x00c00000
+
+/* This is our PSI TCE table. It's 16K entries on P7 and 256K
+ * entries on P8
+ */
+#define PSI_TCE_TABLE_BASE (SKIBOOT_BASE + 0x00e00000)
+#define PSI_TCE_TABLE_SIZE_P7 0x00020000UL
+#define PSI_TCE_TABLE_SIZE_P8 0x00200000UL
+
+/* This is the location of our console buffer at base + 16M */
+#define INMEM_CON_START (SKIBOOT_BASE + 0x01000000)
+#define INMEM_CON_LEN 0x100000
+
+/* This is the location of HBRT console buffer at base + 17M */
+#define HBRT_CON_START (SKIBOOT_BASE + 0x01100000)
+#define HBRT_CON_LEN 0x100000
+
+/* Tell FSP to put the init data at base + 20M, allocate 8M */
+#define SPIRA_HEAP_BASE (SKIBOOT_BASE + 0x01200000)
+#define SPIRA_HEAP_SIZE 0x00800000
+
+/* Total size of the above area
+ *
+ * (Ensure this has at least a 64k alignment)
+ */
+#define SKIBOOT_SIZE 0x01a00000
+
+/* We start laying out the CPU stacks from here, indexed by PIR
+ * each stack is STACK_SIZE in size (naturally aligned power of
+ * two) and the bottom of the stack contains the cpu thread
+ * structure for the processor, so it can be obtained by a simple
+ * bit mask from the stack pointer.
+ *
+ * The size of this array is dynamically determined at boot time
+ */
+#define CPU_STACKS_BASE (SKIBOOT_BASE + SKIBOOT_SIZE)
+
+/*
+ * Address at which we load the kernel LID. This is also where
+ * we expect a passed-in kernel if booting without FSP and
+ * without a built-in kernel.
+ */
+#define KERNEL_LOAD_BASE ((void *)0x20000000)
+#define KERNEL_LOAD_SIZE 0x10000000
+
+/* Size allocated to build the device-tree */
+#define DEVICE_TREE_MAX_SIZE 0x80000
+
+
+#endif /* __MEM_MAP_H */
diff --git a/include/mem_region-malloc.h b/include/mem_region-malloc.h
new file mode 100644
index 0000000..9c772d2
--- /dev/null
+++ b/include/mem_region-malloc.h
@@ -0,0 +1,41 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEM_REGION_MALLOC_H
+#define __MEM_REGION_MALLOC_H
+
+#define __loc2(line) #line
+#define __loc(line) __loc2(line)
+#define __location__ __FILE__ ":" __loc(__LINE__)
+
+void *__malloc(size_t size, const char *location);
+void *__zalloc(size_t size, const char *location);
+void *__realloc(void *ptr, size_t size, const char *location);
+void __free(void *ptr, const char *location);
+void *__memalign(size_t boundary, size_t size, const char *location);
+
+#define malloc(size) __malloc(size, __location__)
+#define zalloc(size) __zalloc(size, __location__)
+#define realloc(ptr, size) __realloc(ptr, size, __location__)
+#define free(ptr) __free(ptr, __location__)
+#define memalign(boundary, size) __memalign(boundary, size, __location__)
+
+void *__local_alloc(unsigned int chip, size_t size, size_t align,
+ const char *location);
+#define local_alloc(chip_id, size, align) \
+ __local_alloc((chip_id), (size), (align), __location__)
+
+#endif /* __MEM_REGION_MALLOC_H */
diff --git a/include/mem_region.h b/include/mem_region.h
new file mode 100644
index 0000000..b2547e5
--- /dev/null
+++ b/include/mem_region.h
@@ -0,0 +1,69 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEMORY_REGION
+#define __MEMORY_REGION
+#include <ccan/list/list.h>
+#include <stdint.h>
+
+enum mem_region_type {
+ /* ranges allocatable by mem_alloc: this will be most of memory */
+ REGION_SKIBOOT_HEAP,
+
+ /* ranges used explicitly for skiboot, but not allocatable. eg .text */
+ REGION_SKIBOOT_FIRMWARE,
+
+ /* ranges reserved, possibly before skiboot init, eg HW framebuffer */
+ REGION_RESERVED,
+
+ /* ranges available for the OS, created by mem_region_release_unused */
+ REGION_OS,
+};
+
+/* An area of physical memory. */
+struct mem_region {
+ struct list_node list;
+ const char *name;
+ uint64_t start, len;
+ struct dt_node *mem_node;
+ enum mem_region_type type;
+ struct list_head free_list;
+};
+
+extern struct lock mem_region_lock;
+void *mem_alloc(struct mem_region *region, size_t size, size_t align,
+ const char *location);
+void mem_free(struct mem_region *region, void *mem,
+ const char *location);
+bool mem_resize(struct mem_region *region, void *mem, size_t len,
+ const char *location);
+size_t mem_size(const struct mem_region *region, const void *ptr);
+bool mem_check(const struct mem_region *region);
+void mem_region_release_unused(void);
+
+/* Specifically for working on the heap. */
+extern struct mem_region skiboot_heap;
+
+void mem_region_init(void);
+
+void mem_region_add_dt_reserved(void);
+
+/* Mark memory as reserved */
+void mem_reserve(const char *name, uint64_t start, uint64_t len);
+
+struct mem_region *find_mem_region(const char *name);
+
+#endif /* __MEMORY_REGION */
diff --git a/include/memory.h b/include/memory.h
new file mode 100644
index 0000000..ce0b22b
--- /dev/null
+++ b/include/memory.h
@@ -0,0 +1,23 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEMORY_H
+#define __MEMORY_H
+
+/* This populate the @memory dt nodes. */
+extern void memory_parse(void);
+
+#endif /* __MEMORY_H */
diff --git a/include/nx.h b/include/nx.h
new file mode 100644
index 0000000..ef02ed5
--- /dev/null
+++ b/include/nx.h
@@ -0,0 +1,22 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NX_H
+#define __NX_H
+
+extern void nx_init(void);
+
+#endif /* __NX_H */
diff --git a/include/op-panel.h b/include/op-panel.h
new file mode 100644
index 0000000..dfb4e11
--- /dev/null
+++ b/include/op-panel.h
@@ -0,0 +1,67 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OP_PANEL_H
+#define __OP_PANEL_H
+
+#include <stdint.h>
+
+/* Severity */
+enum op_severity {
+ OP_LOG = 0x4342, /* 'CB' - Progress info */
+ OP_WARN = 0x4542, /* 'EB' - Information condition */
+ OP_ERROR = 0x4442, /* 'DB' - Non fatal error */
+ OP_FATAL = 0x4242, /* 'BB' - Fatal error */
+};
+
+/* Module */
+enum op_module {
+ OP_MOD_CORE = 0x3030, /* '00' - Anything really */
+ OP_MOD_INIT = 0x3031, /* '01' - init */
+ OP_MOD_LOCK = 0x3032, /* '02' - spinlocks */
+ OP_MOD_FSP = 0x3033, /* '03' - FSP */
+ OP_MOD_FSPCON = 0x3034, /* '04' - FSPCON */
+ OP_MOD_CHIPTOD = 0x3035, /* '05' - ChipTOP */
+ OP_MOD_CPU = 0x3036, /* '06' - CPU bringup */
+ OP_MOD_MEM = 0x3037, /* '07' - Memory */
+ OP_MOD_XSCOM = 0x3038, /* '08' - XSCOM */
+};
+
+/* Common codes:
+ *
+ * 'BA010001' : Failed to load a kernel
+ * 'BA010002' : Failed to create a device-tree
+ * 'BA020000' : Locking already owned lock
+ * 'BA020001' : Unlocking unlocked lock
+ * 'BA020002' : Unlocking not-owned lock
+ * 'BA006666' : Abort
+ * 'BA050000' : Failed ChipTOD init/sync
+ * 'BA050001' : Failed to find a CPU on the master chip
+ * 'BA050002' : Master chip sync failed
+ * 'EA05xxx2' : Slave sync failed (xxx = PIR)
+ * 'BA070000' : Cannot find MS VPD or invalid
+ * 'BA070001' : MS VPD wrong size
+ * 'BA070002' : MS VPD doesn't have an MSAC
+ * 'BA070003' : MS VPD doesn't have a total config
+ */
+
+extern void op_display(enum op_severity, enum op_module, uint16_t code);
+
+extern void op_panel_disable_src_echo(void);
+extern void op_panel_clear_src(void);
+extern void fsp_oppanel_init(void);
+
+#endif /* __OP_PANEL_H */
diff --git a/include/opal-internal.h b/include/opal-internal.h
new file mode 100644
index 0000000..0330b28
--- /dev/null
+++ b/include/opal-internal.h
@@ -0,0 +1,234 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file opal-internal.h
+ * @brief OPAL related internal definitions
+ *
+ */
+
+#ifndef __OPAL_INTERNAL_H
+#define __OPAL_INTERNAL_H
+
+#include <skiboot.h>
+
+struct opal_table_entry {
+ void *func;
+ uint32_t token;
+ uint32_t nargs;
+};
+
+#define opal_call(__tok, __func, __nargs) \
+static struct opal_table_entry __e_##__func __used __section(".opal_table") = \
+{ .func = __func, .token = __tok, \
+ .nargs = __nargs + 0 * sizeof(__func( __test_args##__nargs )) }
+
+/* Make sure function takes args they claim. Look away now... */
+#define __test_args0
+#define __test_args1 0
+#define __test_args2 0,0
+#define __test_args3 0,0,0
+#define __test_args4 0,0,0,0
+#define __test_args5 0,0,0,0,0
+#define __test_args6 0,0,0,0,0,0
+#define __test_args7 0,0,0,0,0,0,0
+
+extern struct opal_table_entry __opal_table_start[];
+extern struct opal_table_entry __opal_table_end[];
+
+extern uint64_t opal_pending_events;
+
+extern struct dt_node *opal_node;
+
+extern void opal_table_init(void);
+extern void opal_update_pending_evt(uint64_t evt_mask, uint64_t evt_values);
+extern void add_opal_node(void);
+
+#define opal_register(token, func, nargs) \
+ __opal_reister((token) + 0*sizeof(func(__test_args##nargs)), \
+ (func), (nargs))
+extern void __opal_register(uint64_t token, void *func, unsigned num_args);
+
+/** @defgroup POLLER Poller
+ * Warning: no locking at the moment, do at init time only
+ * XXX TODO: Add the big RCU-ish "opal API lock" to protect us here
+ * which will also be used for other things such as runtime updates
+ * @ingroup OPAL_INTERNAL POLLER
+ * @{ */
+/** Function Doc */
+extern void opal_add_poller(void (*poller)(void *data), void *data);
+/** Function Doc */
+extern void opal_del_poller(void (*poller)(void *data));
+/** @} */
+
+/** @defgroup NOTIFIER Host Sync Notifier
+ * Warning: no locking, only call that from the init processor
+ * @ingroup OPAL_INTERNAL NOTIFIER
+ * @{ */
+/** Function Doc */
+extern void opal_add_host_sync_notifier(bool (*notify)(void *data), void *data);
+/** Function Doc */
+extern void opal_del_host_sync_notifier(bool (*notify)(void *data));
+/** @} */
+
+
+/** @ingroup OPAL_INTERNAL
+ * @defgroup ERR_TYPE Classification of error/events type reported on OPAL
+ * OPAL error/event type classification
+ * @ingroup OPAL_INTERNAL ERR_TYPE
+ * @{ */
+/** Platform Events/Errors: Report Machine Check Interrupt */
+#define OPAL_PLATFORM_ERR_EVT 0x01
+/** INPUT_OUTPUT: Report all I/O related events/errors */
+#define OPAL_INPUT_OUTPUT_ERR_EVT 0x02
+/** RESOURCE_DEALLOC: Hotplug events and errors */
+#define OPAL_RESOURCE_DEALLOC_ERR_EVT 0x03
+/** MISC: Miscellanous error */
+#define OPAL_MISC_ERR_EVT 0x04
+/** @} */
+
+/**
+ * @ingroup OPAL_INTERNAL
+ * @defgroup ERR_ID OPAL Subsystem IDs listed for reporting events/errors
+ * @ingroup ERR_ID OPAL_INTERNAL
+ * @{ */
+#define OPAL_PROCESSOR_SUBSYSTEM 0x10
+#define OPAL_MEMORY_SUBSYSTEM 0x20
+#define OPAL_IO_SUBSYSTEM 0x30
+#define OPAL_IO_DEVICES 0x40
+#define OPAL_CEC_HARDWARE 0x50
+#define OPAL_POWER_COOLING 0x60
+#define OPAL_MISC_SUBSYSTEM 0x70
+#define OPAL_SURVEILLANCE_ERR 0x7A
+#define OPAL_PLATFORM_FIRMWARE 0x80
+#define OPAL_SOFTWARE 0x90
+#define OPAL_EXTERNAL_ENV 0xA0
+/** @} */
+
+/**
+ * @ingroup OPAL_INTERNAL
+ * @defgroup ERR_SEV OPAL Error Severity
+ * During reporting an event/error the following represents how
+ * serious the logged event/error is. (Severity)
+ * @ingroup OPAL_INTERNAL ERR_SEV
+ * @{ */
+#define OPAL_INFO 0x00
+#define OPAL_RECOVERED_ERR_GENERAL 0x10
+/** @} */
+
+/**
+ * @ingroup ERR_SEV OPAL_INTERNAL
+ * @defgroup ERR_SEV_2 Predictive Error defines
+ * @ingroup ERR_SEV_2 ERR_SEV OPAL_INTERNAL
+ * @{ */
+/** 0x20 Generic predictive error */
+#define OPAL_PREDICTIVE_ERR_GENERAL 0x20
+/** 0x21 Predictive error, degraded performance */
+#define OPAL_PREDICTIVE_ERR_DEGRADED_PERF 0x21
+/** 0x22 Predictive error, fault may be corrected after reboot */
+#define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT 0x22
+/**
+ * 0x23 Predictive error, fault may be corrected after reboot,
+ * degraded performance
+ */
+#define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_BOOT_DEGRADE_PERF 0x23
+/** 0x24 Predictive error, loss of redundancy */
+#define OPAL_PREDICTIVE_ERR_LOSS_OF_REDUNDANCY 0x24
+/** @} */
+
+/** @ingroup ERR_SEV OPAL_INTERNAL
+ * @defgroup ERR_SEV_4 Unrecoverable Error defines
+ * @ingroup ERR_SEV_4 ERR_SEV OPAL_INTERNAL
+ * @{ */
+/** 0x40 Generic Unrecoverable error */
+#define OPAL_UNRECOVERABLE_ERR_GENERAL 0x40
+/** 0x41 Unrecoverable error bypassed with degraded performance */
+#define OPAL_UNRECOVERABLE_ERR_DEGRADE_PERF 0x41
+/** 0x44 Unrecoverable error bypassed with loss of redundancy */
+#define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY 0x44
+/** 0x45 Unrecoverable error bypassed with loss of redundancy and performance */
+#define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY_PERF 0x45
+/** 0x48 Unrecoverable error bypassed with loss of function */
+#define OPAL_UNRECOVERABLE_ERR_LOSS_OF_FUNCTION 0x48
+/** 0x50 In case of PANIC */
+#define OPAL_ERROR_PANIC 0x50
+/** @} */
+
+/**
+ * @ingroup OPAL_INTERNAL
+ * @defgroup OPAL_EVENT_SUB_TYPE Event Sub-Type
+ * This field provides additional information on the non-error
+ * event type
+ * @ingroup OPAL_EVENT_SUB_TYPE OPAL_INTERNAL
+ * @{ */
+#define OPAL_NA 0x00
+#define OPAL_MISCELLANEOUS_INFO_ONLY 0x01
+#define OPAL_PREV_REPORTED_ERR_RECTIFIED 0x10
+#define OPAL_SYS_RESOURCES_DECONFIG_BY_USER 0x20
+#define OPAL_SYS_RESOURCE_DECONFIG_PRIOR_ERR 0x21
+#define OPAL_RESOURCE_DEALLOC_EVENT_NOTIFY 0x22
+#define OPAL_CONCURRENT_MAINTENANCE_EVENT 0x40
+#define OPAL_CAPACITY_UPGRADE_EVENT 0x60
+#define OPAL_RESOURCE_SPARING_EVENT 0x70
+#define OPAL_DYNAMIC_RECONFIG_EVENT 0x80
+#define OPAL_NORMAL_SYS_PLATFORM_SHUTDOWN 0xD0
+#define OPAL_ABNORMAL_POWER_OFF 0xE0
+/** @} */
+
+/** @ingroup OPAL_INTERNAL
+ * Max user dump size is 14K */
+#define OPAL_LOG_MAX_DUMP 14336
+
+/**
+ * @struct opal_user_data_section
+ * @ingroup OPAL_INTERNAL
+ * Multiple user data sections
+ */
+struct opal_user_data_section {
+ uint32_t tag;
+ uint16_t size;
+ uint16_t component_id;
+ char data_dump[1];
+} __attribute__((__packed__));
+
+/**
+ * @struct opal_errorlog
+ * @ingroup OPAL_INTERNAL
+ * All the information regarding an error/event to be reported
+ * needs to populate this structure using pre-defined interfaces
+ * only
+ */
+struct opal_errorlog {
+
+ uint16_t component_id;
+ uint8_t error_event_type;
+ uint8_t subsystem_id;
+
+ uint8_t event_severity;
+ uint8_t event_subtype;
+ uint8_t user_section_count;
+ uint8_t elog_origin;
+
+ uint32_t user_section_size;
+ uint32_t reason_code;
+ uint32_t additional_info[4];
+
+ char user_data_dump[OPAL_LOG_MAX_DUMP];
+ struct list_node link;
+} __attribute__((__packed__));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __OPAL_INTERNAL_H */
diff --git a/include/opal-msg.h b/include/opal-msg.h
new file mode 100644
index 0000000..2be7832
--- /dev/null
+++ b/include/opal-msg.h
@@ -0,0 +1,36 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __OPALMSG_H
+#define __OPALMSG_H
+
+#include <opal.h>
+
+#define OPAL_MAX_ASYNC_COMP 2
+
+int _opal_queue_msg(enum OpalMessageType msg_type, void *data,
+ void (*consumed)(void *data), size_t num_params,
+ const u64 *params);
+
+#define opal_queue_msg(msg_type, data, cb, ...) \
+ _opal_queue_msg(msg_type, data, cb, \
+ sizeof((u64[]) {__VA_ARGS__})/sizeof(u64), \
+ (u64[]) {__VA_ARGS__});
+
+void opal_init_msg(void);
+
+#endif /* __OPALMSG_H */
diff --git a/include/opal.h b/include/opal.h
new file mode 100644
index 0000000..0b80bb4
--- /dev/null
+++ b/include/opal.h
@@ -0,0 +1,912 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OPAL_H
+#define __OPAL_H
+
+/****** OPAL APIs ******/
+
+/* Return codes */
+#define OPAL_SUCCESS 0
+#define OPAL_PARAMETER -1
+#define OPAL_BUSY -2
+#define OPAL_PARTIAL -3
+#define OPAL_CONSTRAINED -4
+#define OPAL_CLOSED -5
+#define OPAL_HARDWARE -6
+#define OPAL_UNSUPPORTED -7
+#define OPAL_PERMISSION -8
+#define OPAL_NO_MEM -9
+#define OPAL_RESOURCE -10
+#define OPAL_INTERNAL_ERROR -11
+#define OPAL_BUSY_EVENT -12
+#define OPAL_HARDWARE_FROZEN -13
+#define OPAL_WRONG_STATE -14
+#define OPAL_ASYNC_COMPLETION -15
+
+/* API Tokens (in r0) */
+#define OPAL_TEST 0
+#define OPAL_CONSOLE_WRITE 1
+#define OPAL_CONSOLE_READ 2
+#define OPAL_RTC_READ 3
+#define OPAL_RTC_WRITE 4
+#define OPAL_CEC_POWER_DOWN 5
+#define OPAL_CEC_REBOOT 6
+#define OPAL_READ_NVRAM 7
+#define OPAL_WRITE_NVRAM 8
+#define OPAL_HANDLE_INTERRUPT 9
+#define OPAL_POLL_EVENTS 10
+#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
+#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
+#define OPAL_PCI_CONFIG_READ_BYTE 13
+#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
+#define OPAL_PCI_CONFIG_READ_WORD 15
+#define OPAL_PCI_CONFIG_WRITE_BYTE 16
+#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
+#define OPAL_PCI_CONFIG_WRITE_WORD 18
+#define OPAL_SET_XIVE 19
+#define OPAL_GET_XIVE 20
+#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
+#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
+#define OPAL_PCI_EEH_FREEZE_STATUS 23
+#define OPAL_PCI_SHPC 24
+#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
+#define OPAL_PCI_EEH_FREEZE_CLEAR 26
+#define OPAL_PCI_PHB_MMIO_ENABLE 27
+#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
+#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
+#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
+#define OPAL_PCI_SET_PE 31
+#define OPAL_PCI_SET_PELTV 32
+#define OPAL_PCI_SET_MVE 33
+#define OPAL_PCI_SET_MVE_ENABLE 34
+#define OPAL_PCI_GET_XIVE_REISSUE 35
+#define OPAL_PCI_SET_XIVE_REISSUE 36
+#define OPAL_PCI_SET_XIVE_PE 37
+#define OPAL_GET_XIVE_SOURCE 38
+#define OPAL_GET_MSI_32 39
+#define OPAL_GET_MSI_64 40
+#define OPAL_START_CPU 41
+#define OPAL_QUERY_CPU_STATUS 42
+#define OPAL_WRITE_OPPANEL 43 /* unimplemented */
+#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
+#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
+#define OPAL_PCI_RESET 49
+#define OPAL_PCI_GET_HUB_DIAG_DATA 50
+#define OPAL_PCI_GET_PHB_DIAG_DATA 51
+#define OPAL_PCI_FENCE_PHB 52
+#define OPAL_PCI_REINIT 53
+#define OPAL_PCI_MASK_PE_ERROR 54
+#define OPAL_SET_SLOT_LED_STATUS 55
+#define OPAL_GET_EPOW_STATUS 56
+#define OPAL_SET_SYSTEM_ATTENTION_LED 57
+#define OPAL_RESERVED1 58
+#define OPAL_RESERVED2 59
+#define OPAL_PCI_NEXT_ERROR 60
+#define OPAL_PCI_EEH_FREEZE_STATUS2 61
+#define OPAL_PCI_POLL 62
+#define OPAL_PCI_MSI_EOI 63
+#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
+#define OPAL_XSCOM_READ 65
+#define OPAL_XSCOM_WRITE 66
+#define OPAL_LPC_READ 67
+#define OPAL_LPC_WRITE 68
+#define OPAL_RETURN_CPU 69
+#define OPAL_REINIT_CPUS 70
+#define OPAL_ELOG_READ 71
+#define OPAL_ELOG_WRITE 72
+#define OPAL_ELOG_ACK 73
+#define OPAL_ELOG_RESEND 74
+#define OPAL_ELOG_SIZE 75
+#define OPAL_FLASH_VALIDATE 76
+#define OPAL_FLASH_MANAGE 77
+#define OPAL_FLASH_UPDATE 78
+#define OPAL_RESYNC_TIMEBASE 79
+#define OPAL_CHECK_TOKEN 80
+#define OPAL_DUMP_INIT 81
+#define OPAL_DUMP_INFO 82
+#define OPAL_DUMP_READ 83
+#define OPAL_DUMP_ACK 84
+#define OPAL_GET_MSG 85
+#define OPAL_CHECK_ASYNC_COMPLETION 86
+#define OPAL_SYNC_HOST_REBOOT 87
+#define OPAL_SENSOR_READ 88
+#define OPAL_GET_PARAM 89
+#define OPAL_SET_PARAM 90
+#define OPAL_DUMP_RESEND 91
+#define OPAL_ELOG_SEND 92
+#define OPAL_PCI_SET_PHB_CAPI_MODE 93
+#define OPAL_DUMP_INFO2 94
+#define OPAL_WRITE_OPPANEL_ASYNC 95
+#define OPAL_LAST 95
+
+#ifndef __ASSEMBLY__
+
+#include <compiler.h>
+#include <stdint.h>
+
+/* Other enums */
+
+enum OpalVendorApiTokens {
+ OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
+};
+
+enum OpalFreezeState {
+ OPAL_EEH_STOPPED_NOT_FROZEN = 0,
+ OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
+ OPAL_EEH_STOPPED_DMA_FREEZE = 2,
+ OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
+ OPAL_EEH_STOPPED_RESET = 4,
+ OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
+ OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
+};
+enum OpalEehFreezeActionToken {
+ OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3
+};
+
+enum OpalPciStatusToken {
+ OPAL_EEH_NO_ERROR = 0,
+ OPAL_EEH_IOC_ERROR = 1,
+ OPAL_EEH_PHB_ERROR = 2,
+ OPAL_EEH_PE_ERROR = 3,
+ OPAL_EEH_PE_MMIO_ERROR = 4,
+ OPAL_EEH_PE_DMA_ERROR = 5
+};
+
+enum OpalPciErrorSeverity {
+ OPAL_EEH_SEV_NO_ERROR = 0,
+ OPAL_EEH_SEV_IOC_DEAD = 1,
+ OPAL_EEH_SEV_PHB_DEAD = 2,
+ OPAL_EEH_SEV_PHB_FENCED = 3,
+ OPAL_EEH_SEV_PE_ER = 4,
+ OPAL_EEH_SEV_INF = 5
+};
+
+enum OpalShpcAction {
+ OPAL_SHPC_GET_LINK_STATE = 0,
+ OPAL_SHPC_GET_SLOT_STATE = 1
+};
+
+enum OpalShpcLinkState {
+ OPAL_SHPC_LINK_DOWN = 0,
+ OPAL_SHPC_LINK_UP_x1 = 1,
+ OPAL_SHPC_LINK_UP_x2 = 2,
+ OPAL_SHPC_LINK_UP_x4 = 4,
+ OPAL_SHPC_LINK_UP_x8 = 8,
+ OPAL_SHPC_LINK_UP_x16 = 16,
+ OPAL_SHPC_LINK_UP_x32 = 32
+};
+enum OpalMmioWindowType {
+ OPAL_M32_WINDOW_TYPE = 1,
+ OPAL_M64_WINDOW_TYPE = 2,
+ OPAL_IO_WINDOW_TYPE = 3
+};
+enum OpalShpcSlotState {
+ OPAL_SHPC_DEV_NOT_PRESENT = 0,
+ OPAL_SHPC_DEV_PRESENT = 1
+};
+enum OpalShpcPowerState {
+ OPAL_SHPC_POWER_OFF = 0,
+ OPAL_SHPC_POWER_ON = 1
+};
+enum OpalExceptionHandler {
+ OPAL_MACHINE_CHECK_HANDLER = 1,
+ OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
+ OPAL_SOFTPATCH_HANDLER = 3
+};
+enum OpalPendingState {
+ OPAL_EVENT_OPAL_INTERNAL = 0x1,
+ OPAL_EVENT_NVRAM = 0x2,
+ OPAL_EVENT_RTC = 0x4,
+ OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
+ OPAL_EVENT_CONSOLE_INPUT = 0x10,
+ OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+ OPAL_EVENT_ERROR_LOG = 0x40,
+ OPAL_EVENT_EPOW = 0x80,
+ OPAL_EVENT_LED_STATUS = 0x100,
+ OPAL_EVENT_PCI_ERROR = 0x200,
+ OPAL_EVENT_DUMP_AVAIL = 0x400,
+ OPAL_EVENT_MSG_PENDING = 0x800,
+};
+
+/* Classification of error/events type reported on OPAL */
+/* Platform Events/Errors: Report Machine Check Interrupt */
+#define OPAL_PLATFORM_ERR_EVT 0x01
+/* INPUT_OUTPUT: Report all I/O related events/errors */
+#define OPAL_INPUT_OUTPUT_ERR_EVT 0x02
+/* RESOURCE_DEALLOC: Hotplug events and errors */
+#define OPAL_RESOURCE_DEALLOC_ERR_EVT 0x03
+/* MISC: Miscellanous error */
+#define OPAL_MISC_ERR_EVT 0x04
+
+/* OPAL Subsystem IDs listed for reporting events/errors */
+#define OPAL_PROCESSOR_SUBSYSTEM 0x10
+#define OPAL_MEMORY_SUBSYSTEM 0x20
+#define OPAL_IO_SUBSYSTEM 0x30
+#define OPAL_IO_DEVICES 0x40
+#define OPAL_CEC_HARDWARE 0x50
+#define OPAL_POWER_COOLING 0x60
+#define OPAL_MISC_SUBSYSTEM 0x70
+#define OPAL_SURVEILLANCE_ERR 0x7A
+#define OPAL_PLATFORM_FIRMWARE 0x80
+#define OPAL_SOFTWARE 0x90
+#define OPAL_EXTERNAL_ENV 0xA0
+
+/*
+ * During reporting an event/error the following represents
+ * how serious the logged event/error is. (Severity)
+ */
+#define OPAL_INFO 0x00
+#define OPAL_RECOVERED_ERR_GENERAL 0x10
+
+/* 0x2X series is to denote set of Predictive Error */
+/* 0x20 Generic predictive error */
+#define OPAL_PREDICTIVE_ERR_GENERAL 0x20
+/* 0x21 Predictive error, degraded performance */
+#define OPAL_PREDICTIVE_ERR_DEGRADED_PERF 0x21
+/* 0x22 Predictive error, fault may be corrected after reboot */
+#define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT 0x22
+/*
+ * 0x23 Predictive error, fault may be corrected after reboot,
+ * degraded performance
+ */
+#define OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_BOOT_DEGRADE_PERF 0x23
+/* 0x24 Predictive error, loss of redundancy */
+#define OPAL_PREDICTIVE_ERR_LOSS_OF_REDUNDANCY 0x24
+
+/* 0x4X series for Unrecoverable Error */
+/* 0x40 Generic Unrecoverable error */
+#define OPAL_UNRECOVERABLE_ERR_GENERAL 0x40
+/* 0x41 Unrecoverable error bypassed with degraded performance */
+#define OPAL_UNRECOVERABLE_ERR_DEGRADE_PERF 0x41
+/* 0x44 Unrecoverable error bypassed with loss of redundancy */
+#define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY 0x44
+/* 0x45 Unrecoverable error bypassed with loss of redundancy and performance */
+#define OPAL_UNRECOVERABLE_ERR_LOSS_REDUNDANCY_PERF 0x45
+/* 0x48 Unrecoverable error bypassed with loss of function */
+#define OPAL_UNRECOVERABLE_ERR_LOSS_OF_FUNCTION 0x48
+/* 0x50 In case of PANIC */
+#define OPAL_ERROR_PANIC 0x50
+
+/*
+ * OPAL Event Sub-type
+ * This field provides additional information on the non-error
+ * event type
+ */
+#define OPAL_NA 0x00
+#define OPAL_MISCELLANEOUS_INFO_ONLY 0x01
+#define OPAL_PREV_REPORTED_ERR_RECTIFIED 0x10
+#define OPAL_SYS_RESOURCES_DECONFIG_BY_USER 0x20
+#define OPAL_SYS_RESOURCE_DECONFIG_PRIOR_ERR 0x21
+#define OPAL_RESOURCE_DEALLOC_EVENT_NOTIFY 0x22
+#define OPAL_CONCURRENT_MAINTENANCE_EVENT 0x40
+#define OPAL_CAPACITY_UPGRADE_EVENT 0x60
+#define OPAL_RESOURCE_SPARING_EVENT 0x70
+#define OPAL_DYNAMIC_RECONFIG_EVENT 0x80
+#define OPAL_NORMAL_SYS_PLATFORM_SHUTDOWN 0xD0
+#define OPAL_ABNORMAL_POWER_OFF 0xE0
+
+/* Max user dump size is 14K */
+#define OPAL_LOG_MAX_DUMP 14336
+
+/* Multiple user data sections */
+struct __attribute__((__packed__))opal_user_data_section {
+ uint32_t tag;
+ uint16_t size;
+ uint16_t component_id;
+ char data_dump[1];
+};
+
+/* Machine check related definitions */
+enum OpalMCE_Version {
+ OpalMCE_V1 = 1,
+};
+
+enum OpalMCE_Severity {
+ OpalMCE_SEV_NO_ERROR = 0,
+ OpalMCE_SEV_WARNING = 1,
+ OpalMCE_SEV_ERROR_SYNC = 2,
+ OpalMCE_SEV_FATAL = 3,
+};
+
+enum OpalMCE_Disposition {
+ OpalMCE_DISPOSITION_RECOVERED = 0,
+ OpalMCE_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum OpalMCE_Initiator {
+ OpalMCE_INITIATOR_UNKNOWN = 0,
+ OpalMCE_INITIATOR_CPU = 1,
+};
+
+enum OpalMCE_ErrorType {
+ OpalMCE_ERROR_TYPE_UNKNOWN = 0,
+ OpalMCE_ERROR_TYPE_UE = 1,
+ OpalMCE_ERROR_TYPE_SLB = 2,
+ OpalMCE_ERROR_TYPE_ERAT = 3,
+ OpalMCE_ERROR_TYPE_TLB = 4,
+};
+
+enum OpalMCE_UeErrorType {
+ OpalMCE_UE_ERROR_INDETERMINATE = 0,
+ OpalMCE_UE_ERROR_IFETCH = 1,
+ OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ OpalMCE_UE_ERROR_LOAD_STORE = 3,
+ OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
+};
+
+enum OpalMCE_SlbErrorType {
+ OpalMCE_SLB_ERROR_INDETERMINATE = 0,
+ OpalMCE_SLB_ERROR_PARITY = 1,
+ OpalMCE_SLB_ERROR_MULTIHIT = 2,
+};
+
+enum OpalMCE_EratErrorType {
+ OpalMCE_ERAT_ERROR_INDETERMINATE = 0,
+ OpalMCE_ERAT_ERROR_PARITY = 1,
+ OpalMCE_ERAT_ERROR_MULTIHIT = 2,
+};
+
+enum OpalMCE_TlbErrorType {
+ OpalMCE_TLB_ERROR_INDETERMINATE = 0,
+ OpalMCE_TLB_ERROR_PARITY = 1,
+ OpalMCE_TLB_ERROR_MULTIHIT = 2,
+};
+
+enum OpalThreadStatus {
+ OPAL_THREAD_INACTIVE = 0x0,
+ OPAL_THREAD_STARTED = 0x1,
+ OPAL_THREAD_UNAVAILABLE = 0x2
+};
+
+enum OpalPciBusCompare {
+ OpalPciBusAny = 0, /* Any bus number match */
+ OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
+ OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
+ OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
+ OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
+ OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
+ OpalPciBusAll = 7, /* Match bus number exactly */
+};
+
+enum OpalDeviceCompare {
+ OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
+ OPAL_COMPARE_RID_DEVICE_NUMBER = 1
+};
+
+enum OpalFuncCompare {
+ OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
+ OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
+};
+
+enum OpalPeAction {
+ OPAL_UNMAP_PE = 0,
+ OPAL_MAP_PE = 1
+};
+
+enum OpalPeltvAction {
+ OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+ OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+ OPAL_DISABLE_MVE = 0,
+ OPAL_ENABLE_MVE = 1
+};
+
+enum OpalM64Action {
+ OPAL_DISABLE_M64 = 0,
+ OPAL_ENABLE_M64_SPLIT = 1,
+ OPAL_ENABLE_M64_NON_SPLIT = 2
+};
+
+enum OpalPciResetScope {
+ OPAL_RESET_PHB_COMPLETE = 1,
+ OPAL_RESET_PCI_LINK = 2,
+ OPAL_RESET_PHB_ERROR = 3,
+ OPAL_RESET_PCI_HOT = 4,
+ OPAL_RESET_PCI_FUNDAMENTAL = 5,
+ OPAL_RESET_PCI_IODA_TABLE = 6
+};
+
+enum OpalPciReinitScope {
+ /*
+ * Note: we chose values that do not overlap
+ * OpalPciResetScope as OPAL v2 used the same
+ * enum for both
+ */
+ OPAL_REINIT_PCI_DEV = 1000
+};
+
+enum OpalPciResetState {
+ OPAL_DEASSERT_RESET = 0,
+ OPAL_ASSERT_RESET = 1
+};
+
+enum OpalPciMaskAction {
+ OPAL_UNMASK_ERROR_TYPE = 0,
+ OPAL_MASK_ERROR_TYPE = 1
+};
+
+enum OpalSlotLedType {
+ OPAL_SLOT_LED_ID_TYPE = 0,
+ OPAL_SLOT_LED_FAULT_TYPE = 1
+};
+
+enum OpalLedAction {
+ OPAL_TURN_OFF_LED = 0,
+ OPAL_TURN_ON_LED = 1,
+ OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
+};
+
+enum OpalEpowStatus {
+ OPAL_EPOW_NONE = 0,
+ OPAL_EPOW_UPS = 1,
+ OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
+ OPAL_EPOW_OVER_INTERNAL_TEMP = 3
+};
+
+enum OpalCheckTokenStatus {
+ OPAL_TOKEN_ABSENT = 0,
+ OPAL_TOKEN_PRESENT = 1
+};
+
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+ OPAL_LPC_MEM = 0,
+ OPAL_LPC_IO = 1,
+ OPAL_LPC_FW = 2,
+};
+
+enum OpalMessageType {
+ OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
+ * additional params function-specific
+ */
+ OPAL_MSG_MEM_ERR,
+ OPAL_MSG_EPOW,
+ OPAL_MSG_SHUTDOWN,
+ OPAL_MSG_TYPE_MAX,
+};
+
+struct opal_msg {
+ uint32_t msg_type;
+ uint32_t reserved;
+ uint64_t params[8];
+};
+
+/* System parameter permission */
+enum OpalSysparamPerm {
+ OPAL_SYSPARAM_READ = 0x1,
+ OPAL_SYSPARAM_WRITE = 0x2,
+ OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
+};
+
+struct opal_machine_check_event {
+ enum OpalMCE_Version version:8; /* 0x00 */
+ uint8_t in_use; /* 0x01 */
+ enum OpalMCE_Severity severity:8; /* 0x02 */
+ enum OpalMCE_Initiator initiator:8; /* 0x03 */
+ enum OpalMCE_ErrorType error_type:8; /* 0x04 */
+ enum OpalMCE_Disposition disposition:8; /* 0x05 */
+ uint8_t reserved_1[2]; /* 0x06 */
+ uint64_t gpr3; /* 0x08 */
+ uint64_t srr0; /* 0x10 */
+ uint64_t srr1; /* 0x18 */
+ union { /* 0x20 */
+ struct {
+ enum OpalMCE_UeErrorType ue_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t physical_address_provided;
+ uint8_t reserved_1[5];
+ uint64_t effective_address;
+ uint64_t physical_address;
+ uint8_t reserved_2[8];
+ } ue_error;
+
+ struct {
+ enum OpalMCE_SlbErrorType slb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } slb_error;
+
+ struct {
+ enum OpalMCE_EratErrorType erat_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } erat_error;
+
+ struct {
+ enum OpalMCE_TlbErrorType tlb_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } tlb_error;
+ } u;
+};
+
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+ OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+ OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
+ OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+ OPAL_MEM_RESILIENCE_CE = 0,
+ OPAL_MEM_RESILIENCE_UE,
+ OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+ OPAL_MEM_DYNAMIC_DEALLOC = 0,
+};
+
+/* OpalMemoryErrorData->flags */
+#define OPAL_MEM_CORRECTED_ERROR 0x0001
+#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
+#define OPAL_MEM_ACK_REQUIRED 0x8000
+
+struct OpalMemoryErrorData {
+ enum OpalMemErr_Version version:8; /* 0x00 */
+ enum OpalMemErrType type:8; /* 0x01 */
+ uint16_t flags; /* 0x02 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ union {
+ /* Memory Resilience corrected/uncorrected error info */
+ struct {
+ enum OpalMemErr_ResilErrType resil_err_type:8;
+ uint8_t reserved_1[7];
+ uint64_t physical_address_start;
+ uint64_t physical_address_end;
+ } resilience;
+ /* Dynamic memory deallocation error info */
+ struct {
+ enum OpalMemErr_DynErrType dyn_err_type:8;
+ uint8_t reserved_1[7];
+ uint64_t physical_address_start;
+ uint64_t physical_address_end;
+ } dyn_dealloc;
+ } u;
+};
+
+enum {
+ OPAL_P7IOC_DIAG_TYPE_NONE = 0,
+ OPAL_P7IOC_DIAG_TYPE_RGC = 1,
+ OPAL_P7IOC_DIAG_TYPE_BI = 2,
+ OPAL_P7IOC_DIAG_TYPE_CI = 3,
+ OPAL_P7IOC_DIAG_TYPE_MISC = 4,
+ OPAL_P7IOC_DIAG_TYPE_I2C = 5,
+ OPAL_P7IOC_DIAG_TYPE_LAST = 6
+};
+
+struct OpalIoP7IOCErrorData {
+ uint16_t type;
+
+ /* GEM */
+ uint64_t gemXfir;
+ uint64_t gemRfir;
+ uint64_t gemRirqfir;
+ uint64_t gemMask;
+ uint64_t gemRwof;
+
+ /* LEM */
+ uint64_t lemFir;
+ uint64_t lemErrMask;
+ uint64_t lemAction0;
+ uint64_t lemAction1;
+ uint64_t lemWof;
+
+ union {
+ struct OpalIoP7IOCRgcErrorData {
+ uint64_t rgcStatus; /* 3E1C10 */
+ uint64_t rgcLdcp; /* 3E1C18 */
+ }rgc;
+ struct OpalIoP7IOCBiErrorData {
+ uint64_t biLdcp0; /* 3C0100, 3C0118 */
+ uint64_t biLdcp1; /* 3C0108, 3C0120 */
+ uint64_t biLdcp2; /* 3C0110, 3C0128 */
+ uint64_t biFenceStatus; /* 3C0130, 3C0130 */
+
+ uint8_t biDownbound; /* BI Downbound or Upbound */
+ }bi;
+ struct OpalIoP7IOCCiErrorData {
+ uint64_t ciPortStatus; /* 3Dn008 */
+ uint64_t ciPortLdcp; /* 3Dn010 */
+
+ uint8_t ciPort; /* Index of CI port: 0/1 */
+ }ci;
+ };
+};
+
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+ OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
+};
+
+enum {
+ OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
+};
+
+enum {
+ OPAL_P7IOC_NUM_PEST_REGS = 128,
+ OPAL_PHB3_NUM_PEST_REGS = 256
+};
+
+struct OpalIoPhbErrorCommon {
+ uint32_t version;
+ uint32_t ioType;
+ uint32_t len;
+};
+
+struct OpalIoP7IOCPhbErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ uint32_t brdgCtl;
+
+ // P7IOC utl regs
+ uint32_t portStatusReg;
+ uint32_t rootCmplxStatus;
+ uint32_t busAgentStatus;
+
+ // P7IOC cfg regs
+ uint32_t deviceStatus;
+ uint32_t slotStatus;
+ uint32_t linkStatus;
+ uint32_t devCmdStatus;
+ uint32_t devSecStatus;
+
+ // cfg AER regs
+ uint32_t rootErrorStatus;
+ uint32_t uncorrErrorStatus;
+ uint32_t corrErrorStatus;
+ uint32_t tlpHdr1;
+ uint32_t tlpHdr2;
+ uint32_t tlpHdr3;
+ uint32_t tlpHdr4;
+ uint32_t sourceId;
+
+ uint32_t rsv3;
+
+ // Record data about the call to allocate a buffer.
+ uint64_t errorClass;
+ uint64_t correlator;
+
+ //P7IOC MMIO Error Regs
+ uint64_t p7iocPlssr; // n120
+ uint64_t p7iocCsr; // n110
+ uint64_t lemFir; // nC00
+ uint64_t lemErrorMask; // nC18
+ uint64_t lemWOF; // nC40
+ uint64_t phbErrorStatus; // nC80
+ uint64_t phbFirstErrorStatus; // nC88
+ uint64_t phbErrorLog0; // nCC0
+ uint64_t phbErrorLog1; // nCC8
+ uint64_t mmioErrorStatus; // nD00
+ uint64_t mmioFirstErrorStatus; // nD08
+ uint64_t mmioErrorLog0; // nD40
+ uint64_t mmioErrorLog1; // nD48
+ uint64_t dma0ErrorStatus; // nD80
+ uint64_t dma0FirstErrorStatus; // nD88
+ uint64_t dma0ErrorLog0; // nDC0
+ uint64_t dma0ErrorLog1; // nDC8
+ uint64_t dma1ErrorStatus; // nE00
+ uint64_t dma1FirstErrorStatus; // nE08
+ uint64_t dma1ErrorLog0; // nE40
+ uint64_t dma1ErrorLog1; // nE48
+ uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS];
+ uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb3ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ uint32_t brdgCtl;
+
+ /* PHB3 UTL regs */
+ uint32_t portStatusReg;
+ uint32_t rootCmplxStatus;
+ uint32_t busAgentStatus;
+
+ /* PHB3 cfg regs */
+ uint32_t deviceStatus;
+ uint32_t slotStatus;
+ uint32_t linkStatus;
+ uint32_t devCmdStatus;
+ uint32_t devSecStatus;
+
+ /* cfg AER regs */
+ uint32_t rootErrorStatus;
+ uint32_t uncorrErrorStatus;
+ uint32_t corrErrorStatus;
+ uint32_t tlpHdr1;
+ uint32_t tlpHdr2;
+ uint32_t tlpHdr3;
+ uint32_t tlpHdr4;
+ uint32_t sourceId;
+
+ uint32_t rsv3;
+
+ /* Record data about the call to allocate a buffer */
+ uint64_t errorClass;
+ uint64_t correlator;
+
+ /* PHB3 MMIO Error Regs */
+ uint64_t nFir; /* 000 */
+ uint64_t nFirMask; /* 003 */
+ uint64_t nFirWOF; /* 008 */
+ uint64_t phbPlssr; /* 120 */
+ uint64_t phbCsr; /* 110 */
+ uint64_t lemFir; /* C00 */
+ uint64_t lemErrorMask; /* C18 */
+ uint64_t lemWOF; /* C40 */
+ uint64_t phbErrorStatus; /* C80 */
+ uint64_t phbFirstErrorStatus; /* C88 */
+ uint64_t phbErrorLog0; /* CC0 */
+ uint64_t phbErrorLog1; /* CC8 */
+ uint64_t mmioErrorStatus; /* D00 */
+ uint64_t mmioFirstErrorStatus; /* D08 */
+ uint64_t mmioErrorLog0; /* D40 */
+ uint64_t mmioErrorLog1; /* D48 */
+ uint64_t dma0ErrorStatus; /* D80 */
+ uint64_t dma0FirstErrorStatus; /* D88 */
+ uint64_t dma0ErrorLog0; /* DC0 */
+ uint64_t dma0ErrorLog1; /* DC8 */
+ uint64_t dma1ErrorStatus; /* E00 */
+ uint64_t dma1FirstErrorStatus; /* E08 */
+ uint64_t dma1ErrorLog0; /* E40 */
+ uint64_t dma1ErrorLog1; /* E48 */
+ uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS];
+ uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
+enum {
+ OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
+ OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
+};
+
+typedef struct oppanel_line {
+ const char * line;
+ uint64_t line_len;
+} oppanel_line_t;
+
+/*
+ * SG entries used for code update
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+ void *data;
+ long length;
+};
+
+/*
+ * Candiate image SG list.
+ *
+ * length = VER | length
+ */
+struct opal_sg_list {
+ unsigned long length;
+ struct opal_sg_list *next;
+ struct opal_sg_entry entry[];
+};
+
+
+/****** Internal **********/
+#include <skiboot.h>
+
+
+/*
+ * All the information regarding an error/event to be reported
+ * needs to populate this structure using pre-defined interfaces
+ * only
+ */
+struct __attribute__((__packed__)) opal_errorlog {
+
+ uint16_t component_id;
+ uint8_t error_event_type;
+ uint8_t subsystem_id;
+
+ uint8_t event_severity;
+ uint8_t event_subtype;
+ uint8_t user_section_count;
+ uint8_t elog_origin;
+
+ uint32_t user_section_size;
+ uint32_t reason_code;
+ uint32_t additional_info[4];
+
+ char user_data_dump[OPAL_LOG_MAX_DUMP];
+ struct list_node link;
+};
+
+/* An opal table entry */
+struct opal_table_entry {
+ void *func;
+ uint32_t token;
+ uint32_t nargs;
+};
+
+#define opal_call(__tok, __func, __nargs) \
+static struct opal_table_entry __e_##__func __used __section(".opal_table") = \
+{ .func = __func, .token = __tok, \
+ .nargs = __nargs + 0 * sizeof(__func( __test_args##__nargs )) }
+
+/* Make sure function takes args they claim. Look away now... */
+#define __test_args0
+#define __test_args1 0
+#define __test_args2 0,0
+#define __test_args3 0,0,0
+#define __test_args4 0,0,0,0
+#define __test_args5 0,0,0,0,0
+#define __test_args6 0,0,0,0,0,0
+#define __test_args7 0,0,0,0,0,0,0
+
+extern struct opal_table_entry __opal_table_start[];
+extern struct opal_table_entry __opal_table_end[];
+
+extern uint64_t opal_pending_events;
+
+extern struct dt_node *opal_node;
+
+extern void opal_table_init(void);
+extern void opal_update_pending_evt(uint64_t evt_mask, uint64_t evt_values);
+extern void add_opal_node(void);
+
+#define opal_register(token, func, nargs) \
+ __opal_register((token) + 0*sizeof(func(__test_args##nargs)), \
+ (func), (nargs))
+extern void __opal_register(uint64_t token, void *func, unsigned num_args);
+
+/* Warning: no locking at the moment, do at init time only
+ *
+ * XXX TODO: Add the big RCU-ish "opal API lock" to protect us here
+ * which will also be used for other things such as runtime updates
+ */
+extern void opal_add_poller(void (*poller)(void *data), void *data);
+extern void opal_del_poller(void (*poller)(void *data));
+
+
+/*
+ * Warning: no locking, only call that from the init processor
+ */
+extern void opal_add_host_sync_notifier(bool (*notify)(void *data), void *data);
+extern void opal_del_host_sync_notifier(bool (*notify)(void *data));
+
+#endif /* __ASSEMBLY__ */
+#endif /* __OPAL_H */
diff --git a/include/p5ioc2-regs.h b/include/p5ioc2-regs.h
new file mode 100644
index 0000000..4cfd2e7
--- /dev/null
+++ b/include/p5ioc2-regs.h
@@ -0,0 +1,251 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __P5IOC2_REGS_H
+#define __P5IOC2_REGS_H
+
+/*
+ * IO HUB registers
+ *
+ * Most (all) of those registers support an AND access
+ * at address + 0x1000 and an OR access at address + 0x2000
+ */
+#define P5IOC2_REG_AND 0x1000
+#define P5IOC2_REG_OR 0x2000
+
+/* Internal BARs */
+#define P5IOC2_BAR0 0x0100
+#define P5IOC2_BAR1 0x0108
+#define P5IOC2_BAR2 0x0110
+#define P5IOC2_BAR3 0x0118
+#define P5IOC2_BAR4 0x0120
+#define P5IOC2_BAR5 0x0128
+#define P5IOC2_BAR6 0x0130
+#define P5IOC2_BAR7 0x0138
+#define P5IOC2_BARM0 0x0180
+#define P5IOC2_BARM1 0x0188
+#define P5IOC2_BARM2 0x0190
+#define P5IOC2_BARM3 0x0198
+#define P5IOC2_BARM4 0x01a0
+#define P5IOC2_BARM5 0x01a8
+#define P5IOC2_BARM6 0x01b0
+#define P5IOC2_BARM7 0x01b8
+#define P5IOC2_BAR(n) (0x100 + ((n) << 3))
+#define P5IOC2_BARM(n) (0x180 + ((n) << 3))
+
+/* Routing table */
+#define P5IOC2_TxRTE(x,n) (0x200 + ((x) << 7) + ((n) << 3))
+#define P5IOC2_TxRTE_VALID PPC_BIT(47)
+
+/* BUID routing table */
+#define P5IOC2_BUIDRTE(n) (0x600 + ((n) << 3))
+#define P5IOC2_BUIDRTE_VALID PPC_BIT(47)
+#define P5IOC2_BUIDRTE_RR_EOI PPC_BIT(48)
+#define P5IOC2_BUIDRTE_RR_RET PPC_BIT(49)
+
+/* Others */
+#define P5IOC2_FIRMC 0x0008 /* FIR Mask Checkstop */
+#define P5IOC2_CTL 0x0030 /* Control register part 1 */
+#define P5IOC2_CTL2 0x00c8 /* Control register part 2 */
+#define P5IOC2_DIRA 0x0090 /* Cache dir. address */
+#define P5IOC2_DIRD 0x0098 /* Cache dir. data */
+#define P5IOC2_IBASE 0x0048 /* Interrupt base address */
+#define P5IOC2_IRBM 0x00d8 /* Interrupt re-issue broadcast mask */
+#define P5IOC2_SID 0x0038 /* P5IOC2 ID register */
+#define P5IOC2_SID_BUID_BASE_MASK PPC_BITMASK(14,22)
+#define P5IOC2_SID_BUID_BASE_LSH PPC_BITLSHIFT(22)
+#define P5IOC2_SID_BUID_MASK_MASK PPC_BITMASK(27,30)
+#define P5IOC2_SID_BUID_MASK_LSH PPC_BITLSHIFT(30)
+#define P5IOC2_SBUID 0x00f8 /* P5IOC2 HUB BUID */
+
+/* XIPM area */
+#define P5IOC2_BUCO 0x40008
+#define P5IOC2_MIIP 0x40000
+#define P5IOC2_XINM 0x40010
+
+/* Xin/Xout area */
+#define P5IOC2_XIXO 0xf0030
+#define P5IOC2_XIXO_ENH_TCE PPC_BIT(0)
+
+/*
+ * Calgary registers
+ *
+ * CA0 is PCI-X and CA1 is PCIE, though the type can be discovered
+ * from registers so we'll simply let it do so
+ */
+
+#define CA_CCR 0x108
+#define CA_DEVBUID 0x118
+#define CA_DEVBUID_MASK PPC_BITMASK32(7,15)
+#define CA_DEVBUID_LSH PPC_BITLSHIFT32(15)
+#define CA_TAR0 0x580
+#define CA_TAR_HUBID_MASK PPC_BITMASK(0,5)
+#define CA_TAR_HUBID_LSH PPC_BITLSHIFT(5)
+#define CA_TAR_ALTHUBID_MASK PPC_BITMASK(6,11)
+#define CA_TAR_ALTHUBID_LSH PPC_BITLSHIFT(11)
+#define CA_TAR_TCE_ADDR_MASK PPC_BITMASK(16,48)
+#define CA_TAR_TCE_ADDR_LSH PPC_BITLSHIFT(48)
+#define CA_TAR_VALID PPC_BIT(60)
+#define CA_TAR_NUM_TCE_MASK PPC_BITMASK(61,63)
+#define CA_TAR_NUM_TCE_LSH PPC_BITLSHIFT(63)
+#define CA_TAR1 0x588
+#define CA_TAR2 0x590
+#define CA_TAR3 0x598
+#define CA_TARn(n) (0x580 + ((n) << 3))
+
+#define CA_PHBID0 0x650
+#define CA_PHBID_PHB_ENABLE PPC_BIT32(0)
+#define CA_PHBID_ADDRSPACE_ENABLE PPC_BIT32(1)
+#define CA_PHBID_PHB_TYPE_MASK PPC_BITMASK32(4,7)
+#define CA_PHBID_PHB_TYPE_LSH PPC_BITLSHIFT32(7)
+#define CA_PHBTYPE_PCIX1_0 0
+#define CA_PHBTYPE_PCIX2_0 1
+#define CA_PHBTYPE_PCIE_G1 4
+#define CA_PHBTYPE_PCIE_G2 5
+/* PCI-X bits */
+#define CA_PHBID_XMODE_EMBEDDED PPC_BIT32(8)
+#define CA_PHBID_XBUS_64BIT PPC_BIT32(9)
+#define CA_PHBID_XBUS_266MHZ PPC_BIT32(10)
+/* PCI-E bits */
+#define CA_PHBID_EWIDTH_MASK PPC_BITMASK32(8,10)
+#define CA_PHBID_EWIDTH_LSH PPC_BITLSHIFT32(10)
+#define CA_PHB_EWIDTH_X4 0
+#define CA_PHB_EWIDTH_X8 1
+#define CA_PHB_EWIDTH_X16 2
+#define CA_PHBID1 0x658
+#define CA_PHBID2 0x660
+#define CA_PHBID3 0x668
+#define CA_PHBIDn(n) (0x650 + ((n) << 3))
+
+/* PHB n reg base inside CA */
+#define CA_PHBn_REGS(n) (0x8000 + ((n) << 12))
+
+/*
+ * P5IOC2 PHB registers
+ */
+#define CAP_BUID 0x100
+#define CAP_BUID_MASK PPC_BITMASK32(7,15)
+#define CAP_BUID_LSH PPC_BITLSHIFT32(15)
+#define CAP_MSIBASE 0x108 /* Undocumented ! */
+#define CAP_DMACSR 0x110
+#define CAP_PLSSR 0x120
+#define CAP_PCADR 0x140
+#define CAP_PCADR_ENABLE PPC_BIT32(0)
+#define CAP_PCADR_FUNC_MASK PPC_BITMASK32(21,23)
+#define CAP_PCADR_FUNC_LSH PPC_BITLSHIFT32(23)
+#define CAP_PCADR_EXTOFF_MASK PPC_BITLSHIFT32(4,7)
+#define CAP_PCADR_EXTOFF_LSH PPC_BITLSHIFT32(7)
+#define CAP_PCDAT 0x130
+#define CAP_PCFGRW 0x160
+#define CAP_PCFGRW_ERR_RECOV_EN PPC_BIT32(1)
+#define CAP_PCFGRW_TCE_EN PPC_BIT32(2)
+#define CAP_PCFGRW_FREEZE_EN PPC_BIT32(3)
+#define CAP_PCFGRW_MMIO_FROZEN PPC_BIT32(4)
+#define CAP_PCFGRW_DMA_FROZEN PPC_BIT32(5)
+#define CAP_PCFGRW_ENHANCED_CFG_EN PPC_BIT32(6)
+#define CAP_PCFGRW_DAC_DISABLE PPC_BIT32(7)
+#define CAP_PCFGRW_2ND_MEM_SPACE_EN PPC_BIT32(9)
+#define CAP_PCFGRW_MASK_PLSSR_IRQ PPC_BIT32(10)
+#define CAP_PCFGRW_MASK_CSR_IRQ PPC_BIT32(11)
+#define CAP_PCFGRW_IO_SPACE_DIABLE PPC_BIT32(12)
+#define CAP_PCFGRW_SZ_MASK_IS_LIMIT PPC_BIT32(13)
+#define CAP_PCFGRW_MSI_EN PPC_BIT32(14)
+#define CAP_IOAD_L 0x170
+#define CAP_IOAD_H 0x180
+#define CAP_MEM1_L 0x190
+#define CAP_MEM1_H 0x1a0
+#define CAP_IOSZ 0x1b0
+#define CAP_MSZ1 0x1c0
+#define CAP_MEM_ST 0x1d0
+#define CAP_IO_ST 0x1e0
+#define CAP_AER 0x200
+#define CAP_BPR 0x210
+#define CAP_CRR 0x270
+#define CAP_CRR_RESET1 PPC_BIT32(0)
+#define CAP_CRR_RESET2 PPC_BIT32(1)
+#define CAP_XIVR0 0x400
+#define CAP_XIVR_PRIO_MASK 0x000000ff
+#define CAP_XIVR_PRIO_LSH 0
+#define CAP_XIVR_SERVER_MASK 0x0000ff00
+#define CAP_XIVR_SERVER_LSH 8
+#define CAP_XIVRn(n) (0x400 + ((n) << 4))
+#define CAP_MVE0 0x500
+#define CAP_MVE_VALID PPC_BIT32(0)
+#define CAP_MVE_TBL_OFF_MASK PPC_BITMASK32(13,15)
+#define CAP_MVE_TBL_OFF_LSH PPC_BITLSHIFT32(15)
+#define CAP_MVE_NUM_INT_MASK PPC_BITMASK32(18,19)
+#define CAP_MVE_NUM_INT_LSH PPC_BITLSHIFT32(19)
+#define CAP_MVE1 0x510
+#define CAP_MODE0 0x880
+#define CAP_MODE1 0x890
+#define CAP_MODE2 0x8a0
+#define CAP_MODE3 0x8b0
+
+/*
+ * SHPC Registers
+ */
+#define SHPC_LOGICAL_SLOT 0xb40
+#define SHPC_LOGICAL_SLOT_STATE_MASK 0x00000002
+#define SHPC_LOGICAL_SLOT_STATE_LSH 0
+#define SHPC_SLOT_STATE_POWER_ONLY 1
+#define SHPC_SLOT_STATE_ENABLED 2
+#define SHPC_SLOT_STATE_DISABLED 3
+#define SHPC_LOGICAL_SLOT_PRSNT_MASK 0x000000c00
+#define SHPC_LOGICAL_SLOT_PRSNT_LSH 10
+#define SHPC_SLOT_PRSTN_7_5W 0
+#define SHPC_SLOT_PRSTN_25W 1
+#define SHPC_SLOT_STATE_15W 2
+#define SHPC_SLOT_STATE_EMPTY 3
+
+/* UTL registers */
+#define UTL_SYS_BUS_CONTROL 0xc00
+#define UTL_STATUS 0xc04
+#define UTL_SYS_BUS_AGENT_STATUS 0xc08
+#define UTL_SYS_BUS_AGENT_ERR_EN 0xc0c
+#define UTL_SYS_BUS_AGENT_IRQ_EN 0xc10
+#define UTL_SYS_BUS_BURST_SZ_CONF 0xc20
+#define UTL_REVISION_ID 0xc24
+#define UTL_TX_NON_POST_DEBUG_STAT1 0xc30
+#define UTL_TX_NON_POST_DEBUG_STAT2 0xc34
+#define UTL_GBIF_READ_REQ_DEBUG 0xc38
+#define UTL_GBIF_WRITE_REQ_DEBUG 0xc3c
+#define UTL_GBIF_TX_COMP_DEBUG 0xc40
+#define UTL_GBIF_RX_COMP_DEBUG 0xc44
+#define UTL_OUT_POST_HDR_BUF_ALLOC 0xc60
+#define UTL_OUT_POST_DAT_BUF_ALLOC 0xc68
+#define UTL_IN_POST_HDR_BUF_ALLOC 0xc70
+#define UTL_IN_POST_DAT_BUF_ALLOC 0xc78
+#define UTL_OUT_NP_BUF_ALLOC 0xc80
+#define UTL_IN_NP_BUF_ALLOC 0xc88
+#define UTL_PCIE_TAGS_ALLOC 0xc90
+#define UTL_GBIF_READ_TAGS_ALLOC 0xc98
+#define UTL_PCIE_PORT_CONTROL 0xca0
+#define UTL_PCIE_PORT_STATUS 0xca4
+#define UTL_PCIE_PORT_ERR_EN 0xca8
+#define UTL_PCIE_PORT_IRQ_EN 0xcac
+#define UTL_RC_STATUS 0xcb0
+#define UTL_RC_ERR_EN 0xcb4
+#define UTL_RC_IRQ_EN 0xcb8
+#define UTL_PCI_PM_CONTROL 0xcc8
+#define UTL_PCIE_PORT_ID 0xccc
+#define UTL_TLP_DEBUG 0xcd0
+#define UTL_VC_CTL_DEBUG 0xcd4
+#define UTL_NP_BUFFER_DEBUG 0xcd8
+#define UTL_POSTED_BUFFER_DEBUG 0xcdc
+#define UTL_TX_FIFO_DEBUG 0xce0
+#define UTL_TLP_COMPL_DEBUG 0xce4
+
+#endif /* __P5IOC2_REGS_H */
diff --git a/include/p5ioc2.h b/include/p5ioc2.h
new file mode 100644
index 0000000..fb9ed1b
--- /dev/null
+++ b/include/p5ioc2.h
@@ -0,0 +1,184 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __P5IOC2_H
+#define __P5IOC2_H
+
+#include <stdint.h>
+#include <cec.h>
+#include <io.h>
+#include <cec.h>
+#include <pci.h>
+#include <lock.h>
+#include <device.h>
+
+#include <ccan/container_of/container_of.h>
+
+/*
+ * Various definitions which are the result of various
+ * things we have hard wired (routing etc...)
+ */
+
+/* It looks like our registers are at an offset from GX BAR 0 ... */
+#define P5IOC2_REGS_OFFSET 0x01F00000
+
+#define P5IOC2_CA0_REG_OFFSET 0 /* From BAR6, R0 */
+#define P5IOC2_CA1_REG_OFFSET 0x01000000 /* From BAR6, R1 */
+#define P5IOC2_CA0_MM_OFFSET 0 /* From BAR0, R0 and 1 */
+#define P5IOC2_CA1_MM_OFFSET 0x400000000ul /* From BAR0, R1 and 2 */
+#define P5IOC2_CA_PHB_COUNT 4
+#define P5IOC2_CA0_RIO_ID 2
+#define P5IOC2_CA1_RIO_ID 3
+#define P5IOC2_CA0_BUID 0x10
+#define P5IOC2_CA1_BUID 0x20
+
+/*
+ * Our memory space is slightly different than pHyp
+ * (or even BML). We do as follow:
+ *
+ * - IO space is in the Calgary MMIO, at (phb_index +1) * 1M
+ * (pHyp seems to mangle the IO space location) and is always
+ * 1M in size mapping to PCI 0
+ *
+ * - Memory space is in the BAR0 mapped region. Each PHB gets
+ * allocated a 4G window at base + (phb_index * 4G). It uses
+ * a portion of that space based on the chosen size of the
+ * MMIO space, typically 2G.
+ */
+#define MM_WINDOW_SIZE 0x100000000ul
+#define MM_PCI_START 0x80000000
+#define MM_PCI_SIZE 0x80000000
+#define IO_PCI_START 0x00000000
+#define IO_PCI_SIZE 0x00100000
+
+/*
+ * CAn interrupts
+ *
+ * Within Calgary BUID space
+ */
+#define P5IOC2_CA_HOST_IRQ 0
+#define P5IOC2_CA_SPCN_IRQ 1
+#define P5IOC2_CA_PERF_IRQ 2
+
+/*
+ * The PHB states are similar to P7IOC, see the explanation
+ * in p7ioc.h
+ */
+enum p5ioc2_phb_state {
+ /* First init state */
+ P5IOC2_PHB_STATE_UNINITIALIZED,
+
+ /* During PHB HW inits */
+ P5IOC2_PHB_STATE_INITIALIZING,
+
+ /* Set if the PHB is for some reason unusable */
+ P5IOC2_PHB_STATE_BROKEN,
+
+ /* Normal PHB functional state */
+ P5IOC2_PHB_STATE_FUNCTIONAL,
+};
+
+/*
+ * Structure for a PHB
+ */
+
+struct p5ioc2;
+
+struct p5ioc2_phb {
+ bool active; /* Is this PHB functional ? */
+ bool is_pcie;
+ uint8_t ca; /* CA0 or CA1 */
+ uint8_t index; /* 0..3 index inside CA */
+ void *ca_regs; /* Calgary regs */
+ void *regs; /* PHB regs */
+ struct lock lock;
+ uint32_t buid;
+ uint64_t mm_base;
+ uint64_t io_base;
+ int64_t ecap; /* cached PCI-E cap offset */
+ int64_t aercap; /* cached AER ecap offset */
+ enum p5ioc2_phb_state state;
+ uint64_t delay_tgt_tb;
+ uint64_t retries;
+ uint64_t xive_cache[16];
+ struct p5ioc2 *ioc;
+ struct phb phb;
+};
+
+static inline struct p5ioc2_phb *phb_to_p5ioc2_phb(struct phb *phb)
+{
+ return container_of(phb, struct p5ioc2_phb, phb);
+}
+
+extern void p5ioc2_phb_setup(struct p5ioc2 *ioc, struct p5ioc2_phb *p,
+ uint8_t ca, uint8_t index, bool active,
+ uint32_t buid);
+
+/*
+ * State structure for P5IOC2 IO HUB
+ */
+struct p5ioc2 {
+ /* Device node */
+ struct dt_node *dt_node;
+
+ /* MMIO regs for the chip */
+ void *regs;
+
+ /* BAR6 (matches GX BAR 1) is used for internal Calgary MMIO and
+ * for PCI IO space.
+ */
+ uint64_t bar6;
+
+ /* BAR0 (matches GX BAR 2) is used for PCI memory space */
+ uint64_t bar0;
+
+ /* Calgary 0 and 1 registers. We assume their BBAR values as such
+ * that CA0 is at bar6 and CA1 at bar6 + 16M
+ */
+ void* ca0_regs;
+ void* ca1_regs;
+
+ /* The large MM regions assigned off bar0 to CA0 and CA1 for use
+ * by their PHBs (16G each)
+ */
+ uint64_t ca0_mm_region;
+ uint64_t ca1_mm_region;
+
+ /* BUID base for the PHB. This does include the top bits
+ * (chip, GX bus ID, etc...). This is initialized from the
+ * SPIRA.
+ */
+ uint32_t buid_base;
+
+ /* TCE region set by the user */
+ uint64_t tce_base;
+ uint64_t tce_size;
+
+ /* Calgary 0 and 1 PHBs */
+ struct p5ioc2_phb ca0_phbs[P5IOC2_CA_PHB_COUNT];
+ struct p5ioc2_phb ca1_phbs[P5IOC2_CA_PHB_COUNT];
+
+ uint32_t host_chip;
+ uint32_t gx_bus;
+ struct io_hub hub;
+};
+
+static inline struct p5ioc2 *iohub_to_p5ioc2(struct io_hub *hub)
+{
+ return container_of(hub, struct p5ioc2, hub);
+}
+
+#endif /* __P5IOC2_H */
diff --git a/include/p7ioc-regs.h b/include/p7ioc-regs.h
new file mode 100644
index 0000000..daac1eb
--- /dev/null
+++ b/include/p7ioc-regs.h
@@ -0,0 +1,472 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __P7IOC_REGS_H
+#define __P7IOC_REGS_H
+
+/*
+ * Register definitions
+ *
+ * We only define some registers here. Ideally we should auto-generate
+ * the full list from the spec. For now I add them as I need them
+ */
+
+/* RGC GEM registers */
+#define P7IOC_GEM_XFIR 0x3E0008
+#define P7IOC_GEM_RFIR 0x3E0010
+#define P7IOC_GEM_RIRQFIR 0x3E0018
+#define P7IOC_GEM_MASK 0x3E0020
+#define P7IOC_GEM_RWOF 0x3E0028
+
+/* LEM register base */
+#define P7IOC_RGC_LEM_BASE 0x3E1E00
+#define P7IOC_BI_UP_LEM_BASE 0x3C0000
+#define P7IOC_BI_DOWN_LEM_BASE 0x3C0050
+#define P7IOC_CI_PORTn_LEM_BASE(n) (0x3d0200 | ((n) * 0x1000))
+#define P7IOC_PHBn_LEM_BASE(n) (0x000C00 | ((n) * 0x10000))
+#define P7IOC_MISC_LEM_BASE 0x3EA000
+#define P7IOC_I2C_LEM_BASE 0x3EB000
+
+/* LEM register offset */
+#define P7IOC_LEM_FIR_OFFSET 0x00
+#define P7IOC_LEM_FIR_AND_OFFSET 0x08
+#define P7IOC_LEM_FIR_OR_OFFSET 0x10
+#define P7IOC_LEM_ERR_MASK_OFFSET 0x18
+#define P7IOC_LEM_ERR_MASK_AND_OFFSET 0x20
+#define P7IOC_LEM_ERR_MASK_OR_OFFSET 0x28
+#define P7IOC_LEM_ACTION_0_OFFSET 0x30
+#define P7IOC_LEM_ACTION_1_OFFSET 0x38
+#define P7IOC_LEM_WOF_OFFSET 0x40
+
+/* HSS registers */
+#define P7IOC_HSS_BASE 0x3E8000
+#define P7IOC_HSS_STRIDE 0x200
+#define P7IOC_HSSn_CTL2_OFFSET 0x10
+#define P7IOC_HSSn_CTL3_OFFSET 0x18
+#define P7IOC_HSSn_CTL8_OFFSET 0x40
+#define P7IOC_HSSn_CTL9_OFFSET 0x48
+#define P7IOC_HSSn_CTL10_OFFSET 0x50
+#define P7IOC_HSSn_CTL11_OFFSET 0x58
+#define P7IOC_HSSn_CTL12_OFFSET 0x60
+#define P7IOC_HSSn_CTL13_OFFSET 0x68
+#define P7IOC_HSSn_CTL14_OFFSET 0x70
+#define P7IOC_HSSn_CTL15_OFFSET 0x78
+#define P7IOC_HSSn_CTL16_OFFSET 0x80
+#define P7IOC_HSSn_CTL17_OFFSET 0x88
+#define P7IOC_HSSn_CTL18_OFFSET 0x90
+#define P7IOC_HSSn_CTL19_OFFSET 0x98
+#define P7IOC_HSSn_CTL20_OFFSET 0xa0
+#define P7IOC_HSSn_CTL21_OFFSET 0xa8
+#define P7IOC_HSSn_CTL22_OFFSET 0xb0
+#define P7IOC_HSSn_CTL23_OFFSET 0xb8
+
+/* CI Routing registers & helper macros */
+#define P7IOC_CI_RMATC_REG(i) (0x3D0400ul + ((i) << 4))
+#define P7IOC_CI_RMASK_REG(i) (0x3D0408ul + ((i) << 4))
+
+#define P7IOC_CI_RMATC_PORT(n) PPC_BIT(n)
+#define P7IOC_CI_RMATC_ADDR_VALID PPC_BIT(16)
+#define P7IOC_CI_RMATC_BUID_VALID PPC_BIT(17)
+#define P7IOC_CI_RMATC_TYPE_VALID PPC_BIT(18)
+
+/* AIB Addresses are 48-bit, the top 32 are used in
+ * the routing tables, we thus shift by 16
+ */
+#define P7IOC_CI_RMATC_ENCODE_ADDR(addr) ((uint32_t)((addr) >> 16))
+#define P7IOC_CI_RMATC_ENCODE_BUID(buid) ((uint32_t)((buid) << 20))
+#define P7IOC_CI_RMATC_ENCODE_TYPE(type) ((uint32_t)(type))
+
+/* CI port numbers */
+#define P7IOC_CI_PHB_PORT(pnum) ((pnum) + 2)
+#define P7IOC_CI_UPSTREAM_PORT 0
+#define P7IOC_CI_RGC_PORT 1
+
+/* Other random chip registers */
+#define P7IOC_CHIP_FENCE_SHADOW 0x3ec010
+#define P7IOC_CHIP_FENCE_WOF 0x3ec018
+#define P7IOC_CCRR 0x3e1c00
+
+/* CI registers */
+#define P7IOC_CIn_BASE(n) (0x3d0000 | ((n) * 0x1000))
+#define P7IOC_CIn_LEM_FIR(n) (P7IOC_CIn_BASE(n) + 0x200)
+#define P7IOC_CIn_LEM_FIR_AND(n) (P7IOC_CIn_BASE(n) + 0x208)
+#define P7IOC_CIn_LEM_FIR_OR(n) (P7IOC_CIn_BASE(n) + 0x210)
+#define P7IOC_CIn_LEM_ERR_MASK(n) (P7IOC_CIn_BASE(n) + 0x218)
+#define P7IOC_CIn_LEM_ERR_MASK_AND(n) (P7IOC_CIn_BASE(n) + 0x220)
+#define P7IOC_CIn_LEM_ERR_MASK_OR(n) (P7IOC_CIn_BASE(n) + 0x228)
+
+/*
+ * PHB registers
+ */
+
+/* PHB Fundamental register set A */
+#define PHB_BUID 0x100
+#define PHB_BUID_LSI_MASK PPC_BITMASK(7,15)
+#define PHB_BUID_LSI_LSH PPC_BITLSHIFT(15)
+#define PHB_BUID_MSI_MASK PPC_BITMASK(23,31)
+#define PHB_BUID_MSI_LSH PPC_BITLSHIFT(31)
+#define PHB_DMA_CHAN_STATUS 0x110
+#define PHB_CPU_LOADSTORE_STATUS 0x120
+#define PHB_CONFIG_DATA 0x130
+#define PHB_LOCK0 0x138
+#define PHB_CONFIG_ADDRESS 0x140
+#define PHB_CA_ENABLE PPC_BIT(0)
+#define PHB_CA_BUS_MASK PPC_BITMASK(4,11)
+#define PHB_CA_BUS_LSH PPC_BITLSHIFT(11)
+#define PHB_CA_DEV_MASK PPC_BITMASK(12,16)
+#define PHB_CA_DEV_LSH PPC_BITLSHIFT(16)
+#define PHB_CA_FUNC_MASK PPC_BITMASK(17,19)
+#define PHB_CA_FUNC_LSH PPC_BITLSHIFT(19)
+#define PHB_CA_REG_MASK PPC_BITMASK(20,31)
+#define PHB_CA_REG_LSH PPC_BITLSHIFT(31)
+#define PHB_LOCK1 0x148
+#define PHB_PHB2_CONFIG 0x160
+#define PHB_PHB2C_64B_TCE_EN PPC_BIT(2)
+#define PHB_PHB2C_32BIT_MSI_EN PPC_BIT(8)
+#define PHB_PHB2C_IO_EN PPC_BIT(12)
+#define PHB_PHB2C_64BIT_MSI_EN PPC_BIT(14)
+#define PHB_PHB2C_M32_EN PPC_BIT(16)
+#define PHB_IO_BASE_ADDR 0x170
+#define PHB_IO_BASE_MASK 0x178
+#define PHB_IO_START_ADDR 0x180
+#define PHB_M32_BASE_ADDR 0x190
+#define PHB_M32_BASE_MASK 0x198
+#define PHB_M32_START_ADDR 0x1a0
+#define PHB_M64_UPPER_BITS 0x1f0
+#define PHB_TCE_KILL 0x210
+#define PHB_TCEKILL_PAIR PPC_BIT(0)
+#define PHB_TCEKILL_ADDR_MASK PPC_BITMASK(16,59)
+#define PHB_TCE_PREFETCH 0x218
+#define PHB_IODA_ADDR 0x220
+#define PHB_IODA_AD_AUTOINC PPC_BIT(0)
+#define PHB_IODA_AD_TSEL_MASK PPC_BITMASK(11,15)
+#define PHB_IODA_AD_TSEL_LSH PPC_BITLSHIFT(15)
+#define PHB_IODA_AD_TADR_MASK PPC_BITMASK(48,63)
+#define PHB_IODA_AD_TADR_LSH PPC_BITLSHIFT(63)
+#define PHB_IODA_DATA0 0x228
+#define PHB_IODA_DATA1 0x230
+#define PHB_LOCK2 0x240
+#define PHB_XIVE_UPDATE 0x248
+#define PHB_PHB2_GEN_CAP 0x250
+#define PHB_PHB2_TCE_CAP 0x258
+#define PHB_PHB2_IRQ_CAP 0x260
+#define PHB_PHB2_EEH_CAP 0x268
+#define PHB_PAPR_ERR_INJ_CONTROL 0x2b0
+#define PHB_PAPR_ERR_INJ_ADDR 0x2b8
+#define PHB_PAPR_ERR_INJ_MASK 0x2c0
+#define PHB_ETU_ERR_SUMMARY 0x2c8
+
+/* UTL registers */
+#define UTL_SYS_BUS_CONTROL 0x400
+#define UTL_STATUS 0x408
+#define UTL_SYS_BUS_AGENT_STATUS 0x410
+#define UTL_SYS_BUS_AGENT_ERR_SEVERITY 0x418
+#define UTL_SYS_BUS_AGENT_IRQ_EN 0x420
+#define UTL_SYS_BUS_BURST_SZ_CONF 0x440
+#define UTL_REVISION_ID 0x448
+#define UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
+#define UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
+#define UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
+#define UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
+#define UTL_OUT_NP_BUF_ALLOC 0x500
+#define UTL_IN_NP_BUF_ALLOC 0x510
+#define UTL_PCIE_TAGS_ALLOC 0x520
+#define UTL_GBIF_READ_TAGS_ALLOC 0x530
+#define UTL_PCIE_PORT_CONTROL 0x540
+#define UTL_PCIE_PORT_STATUS 0x548
+#define UTL_PCIE_PORT_ERROR_SEV 0x550
+#define UTL_PCIE_PORT_IRQ_EN 0x558
+#define UTL_RC_STATUS 0x560
+#define UTL_RC_ERR_SEVERITY 0x568
+#define UTL_RC_IRQ_EN 0x570
+#define UTL_EP_STATUS 0x578
+#define UTL_EP_ERR_SEVERITY 0x580
+#define UTL_EP_ERR_IRQ_EN 0x588
+#define UTL_PCI_PM_CTRL1 0x590
+#define UTL_PCI_PM_CTRL2 0x598
+#define UTL_GP_CTL1 0x5a0
+#define UTL_GP_CTL2 0x5a8
+
+/* PCI-E Stack registers */
+#define PHB_PCIE_SYSTEM_CONFIG 0x600
+#define PHB_PCIE_BUS_NUMBER 0x608
+#define PHB_PCIE_SYSTEM_TEST 0x618
+#define PHB_PCIE_LINK_MANAGEMENT 0x630
+#define PHB_PCIE_DLP_TRAIN_CTL 0x640
+#define PHB_PCIE_DLP_TCTX_DISABLE PPC_BIT(1)
+#define PHB_PCIE_DLP_TCRX_DISABLED PPC_BIT(16)
+#define PHB_PCIE_DLP_TC_DL_LINKUP PPC_BIT(21)
+#define PHB_PCIE_DLP_TC_DL_PGRESET PPC_BIT(22)
+#define PHB_PCIE_DLP_TC_DL_LINKACT PPC_BIT(23)
+#define PHB_PCIE_SLOP_LOOPBACK_STATUS 0x648
+#define PHB_PCIE_AER_CONTROL 0x650
+#define PHB_PCIE_AUX_POWER_CONTROL 0x658
+#define PHB_PCIE_SLOTCTL1 0x660
+#define PHB_PCIE_SLOTCTL2 0x668
+#define PHB_PCIE_SLOTCTL2_SLOTWAKE PPC_BIT(16)
+#define PHB_PCIE_SLOTCTL2_PWR_EN_STAT PPC_BIT(17)
+#define PHB_PCIE_SLOTCTL2_RCK_EN_STAT PPC_BIT(18)
+#define PHB_PCIE_SLOTCTL2_PERST_STAT PPC_BIT(19)
+#define PHB_PCIE_SLOTCTL2_PLED_S_MASK PPC_BITMASK(20,21)
+#define PHB_PCIE_SLOTCTL2_PLED_S_LSH PPC_BITLSHIFT(21) /* use PCIE_INDIC_* */
+#define PHB_PCIE_SLOTCTL2_ALED_S_MASK PPC_BITMASK(22,23)
+#define PHB_PCIE_SLOTCTL2_ALED_S_LSH PPC_BITLSHIFT(23)
+#define PHB_PCIE_SLOTCTL2_PRSTN_STAT PPC_BIT(24)
+#define PHB_PCIE_SLOTCTL2_PWRFLT_STAT PPC_BIT(25)
+#define PHB_PCIE_UTL_CONFIG 0x670
+#define PHB_PCIE_DLP_CONTROL 0x678
+#define PHB_PCIE_UTL_ERRLOG1 0x680
+#define PHB_PCIE_UTL_ERRLOG2 0x688
+#define PHB_PCIE_UTL_ERRLOG3 0x690
+#define PHB_PCIE_UTL_ERRLOG4 0x698
+#define PHB_PCIE_DLP_ERRLOG1 0x6a0
+#define PHB_PCIE_DLP_ERRLOG2 0x6a8
+#define PHB_PCIE_UTL_ERR_INJECT 0x6c0
+#define PHB_PCIE_TLDLP_ERR_INJECT 0x6c8
+#define PHB_PCIE_STRAPPING 0x700
+
+/* Fundamental register set B */
+#define PHB_VERSION 0x800
+#define PHB_RESET 0x808
+#define PHB_CONTROL 0x810
+#define PHB_AIB_RX_CRED_INIT_TIMER 0x818
+#define PHB_AIB_RX_CMD_CRED 0x820
+#define PHB_AIB_RX_DATA_CRED 0x828
+#define PHB_AIB_TX_CMD_CRED 0x830
+#define PHB_AIB_TX_DATA_CRED 0x838
+#define PHB_AIB_TX_CHAN_MAPPING 0x840
+#define PHB_AIB_TX_CRED_SYNC_CTRL 0x848
+#define PHB_LEGACY_CTRL 0x850
+#define PHB_AIB_TAG_ENABLE 0x858
+#define PHB_AIB_FENCE_CTRL 0x860
+#define PHB_TCE_TAG_ENABLE 0x868
+#define PHB_TCE_WATERMARK 0x870
+#define PHB_TIMEOUT_CTRL1 0x878
+#define PHB_TIMEOUT_CTRL2 0x880
+#define PHB_QUIESCE_DMA_G 0x888
+#define PHB_AIB_TAG_STATUS 0x900
+#define PHB_TCE_TAG_STATUS 0x908
+
+/* FIR & Error registers */
+#define PHB_LEM_FIR_ACCUM 0xc00
+#define PHB_LEM_FIR_AND_MASK 0xc08
+#define PHB_LEM_FIR_OR_MASK 0xc10
+#define PHB_LEM_ERROR_MASK 0xc18
+#define PHB_LEM_ERROR_AND_MASK 0xc20
+#define PHB_LEM_ERROR_OR_MASK 0xc28
+#define PHB_LEM_ACTION0 0xc30
+#define PHB_LEM_ACTION1 0xc38
+#define PHB_LEM_WOF 0xc40
+#define PHB_ERR_STATUS 0xc80
+#define PHB_ERR1_STATUS 0xc88
+#define PHB_ERR_INJECT 0xc90
+#define PHB_ERR_LEM_ENABLE 0xc98
+#define PHB_ERR_IRQ_ENABLE 0xca0
+#define PHB_ERR_FREEZE_ENABLE 0xca8
+#define PHB_ERR_AIB_FENCE_ENABLE 0xcb0
+#define PHB_ERR_LOG_0 0xcc0
+#define PHB_ERR_LOG_1 0xcc8
+#define PHB_ERR_STATUS_MASK 0xcd0
+#define PHB_ERR1_STATUS_MASK 0xcd8
+
+#define PHB_OUT_ERR_STATUS 0xd00
+#define PHB_OUT_ERR1_STATUS 0xd08
+#define PHB_OUT_ERR_INJECT 0xd10
+#define PHB_OUT_ERR_LEM_ENABLE 0xd18
+#define PHB_OUT_ERR_IRQ_ENABLE 0xd20
+#define PHB_OUT_ERR_FREEZE_ENABLE 0xd28
+#define PHB_OUT_ERR_AIB_FENCE_ENABLE 0xd30
+#define PHB_OUT_ERR_LOG_0 0xd40
+#define PHB_OUT_ERR_LOG_1 0xd48
+#define PHB_OUT_ERR_STATUS_MASK 0xd50
+#define PHB_OUT_ERR1_STATUS_MASK 0xd58
+
+#define PHB_INA_ERR_STATUS 0xd80
+#define PHB_INA_ERR1_STATUS 0xd88
+#define PHB_INA_ERR_INJECT 0xd90
+#define PHB_INA_ERR_LEM_ENABLE 0xd98
+#define PHB_INA_ERR_IRQ_ENABLE 0xda0
+#define PHB_INA_ERR_FREEZE_ENABLE 0xda8
+#define PHB_INA_ERR_AIB_FENCE_ENABLE 0xdb0
+#define PHB_INA_ERR_LOG_0 0xdc0
+#define PHB_INA_ERR_LOG_1 0xdc8
+#define PHB_INA_ERR_STATUS_MASK 0xdd0
+#define PHB_INA_ERR1_STATUS_MASK 0xdd8
+
+#define PHB_INB_ERR_STATUS 0xe00
+#define PHB_INB_ERR1_STATUS 0xe08
+#define PHB_INB_ERR_INJECT 0xe10
+#define PHB_INB_ERR_LEM_ENABLE 0xe18
+#define PHB_INB_ERR_IRQ_ENABLE 0xe20
+#define PHB_INB_ERR_FREEZE_ENABLE 0xe28
+#define PHB_INB_ERR_AIB_FENCE_ENABLE 0xe30
+#define PHB_INB_ERR_LOG_0 0xe40
+#define PHB_INB_ERR_LOG_1 0xe48
+#define PHB_INB_ERR_STATUS_MASK 0xe50
+#define PHB_INB_ERR1_STATUS_MASK 0xe58
+
+/* Performance monitor & Debug registers */
+#define PHB_TRACE_CONTROL 0xf80
+#define PHB_PERFMON_CONFIG 0xf88
+#define PHB_PERFMON_CTR0 0xf90
+#define PHB_PERFMON_CTR1 0xf98
+#define PHB_PERFMON_CTR2 0xfa0
+#define PHB_PERFMON_CTR3 0xfa8
+#define PHB_HOTPLUG_OVERRIDE 0xfb0
+
+/*
+ * IODA tables
+ */
+
+#define IODA_TBL_HRT 0
+#define IODA_TBL_LIST 1
+#define IODA_TBL_LXIVT 2
+#define IODA_TBL_MIST 3
+#define IODA_TBL_MXIVT 4
+#define IODA_TBL_MVT 5
+#define IODA_TBL_PELTM 6
+#define IODA_TBL_PESTA 7
+#define IODA_TBL_PESTB 8
+#define IODA_TBL_TVT 9
+#define IODA_TBL_TCAM 10
+#define IODA_TBL_TDR 11
+#define IODA_TBL_PELTV 12
+#define IODA_TBL_M64BT 16
+#define IODA_TBL_IODT 17
+#define IODA_TBL_M32DT 18
+#define IODA_TBL_M64DT 19
+#define IODA_TBL_PEEV 20
+
+/* L/M XIVT */
+#define IODA_XIVT_SERVER_MASK PPC_BITMASK(8,23)
+#define IODA_XIVT_SERVER_LSH PPC_BITLSHIFT(23)
+#define IODA_XIVT_PRIORITY_MASK PPC_BITMASK(24,31)
+#define IODA_XIVT_PRIORITY_LSH PPC_BITLSHIFT(31)
+#define IODA_XIVT_PENUM_MASK PPC_BITMASK(41,47)
+#define IODA_XIVT_PENUM_LSH PPC_BITLSHIFT(47)
+#define IODA_XIVT_HUBNUM_MASK PPC_BITMASK(58,59)
+#define IODA_XIVT_HUBNUM_LSH PPC_BITLSHIFT(59)
+
+/* M64BT */
+#define IODA_M64BT_ENABLE PPC_BIT(0)
+#define IODA_M64BT_BASE_MASK PPC_BITMASK(8,31)
+#define IODA_M64BT_BASE_LSH PPC_BITLSHIFT(31)
+#define IODA_M64BT_MASK_MASK PPC_BITMASK(40,63)
+#define IODA_M64BT_MASK_LSH PPC_BITLSHIFT(63)
+
+/* IODT/M32DT/M64DX */
+#define IODA_XXDT_PE_MASK PPC_BITMASK(0,6)
+#define IODA_XXDT_PE_LSH PPC_BITLSHIFT(6)
+
+/* PELTM */
+#define IODA_PELTM_BUS_MASK PPC_BITMASK(0,7)
+#define IODA_PELTM_BUS_LSH PPC_BITLSHIFT(7)
+#define IODA_PELTM_DEV_MASK PPC_BITMASK(8,12)
+#define IODA_PELTM_DEV_LSH PPC_BITLSHIFT(12)
+#define IODA_PELTM_FUNC_MASK PPC_BITMASK(13,15)
+#define IODA_PELTM_FUNC_LSH PPC_BITLSHIFT(15)
+#define IODA_PELTM_BUS_VALID_MASK PPC_BITMASK(16,18)
+#define IODA_PELTM_BUS_VALID_LSH PPC_BITLSHIFT(18)
+#define IODA_BUS_VALID_ANY 0
+#define IODA_BUS_VALID_3_BITS 2
+#define IODA_BUS_VALID_4_BITS 3
+#define IODA_BUS_VALID_5_BITS 4
+#define IODA_BUS_VALID_6_BITS 5
+#define IODA_BUS_VALID_7_BITS 6
+#define IODA_BUS_VALID_ALL 7
+#define IODA_PELTM_DEV_VALID PPC_BIT(19)
+#define IODA_PELTM_FUNC_VALID PPC_BIT(20)
+
+/* TVT */
+#define IODA_TVT0_TABLE_ADDR_MASK PPC_BITMASK(0,47)
+#define IODA_TVT0_TABLE_ADDR_LSH PPC_BITLSHIFT(47)
+#define IODA_TVT0_BUS_VALID_MASK PPC_BITMASK(48,50)
+#define IODA_TVT0_BUS_VALID_LSH PPC_BITLSHIFT(50)
+#define IODA_TVT0_TCE_TABLE_SIZE_MASK PPC_BITMASK(51,55)
+#define IODA_TVT0_TCE_TABLE_SIZE_LSH PPC_BITLSHIFT(55)
+#define IODA_TVT0_BUS_NUM_MASK PPC_BITMASK(56,63)
+#define IODA_TVT0_BUS_NUM_LSH PPC_BITLSHIFT(63)
+#define IODA_TVT1_DEV_VALID PPC_BIT(2)
+#define IODA_TVT1_DEV_NUM_MASK PPC_BITMASK(3,7)
+#define IODA_TVT1_DEV_NUM_LSH PPC_BITLSHIFT(7)
+#define IODA_TVT1_HUB_NUM_MASK PPC_BITMASK(10,11)
+#define IODA_TVT1_HUB_NUM_LSH PPC_BITLSHIFT(11)
+#define IODA_TVT1_FUNC_VALID PPC_BIT(12)
+#define IODA_TVT1_FUNC_NUM_MASK PPC_BITMASK(13,15)
+#define IODA_TVT1_FUNC_NUM_LSH PPC_BITLSHIFT(15)
+#define IODA_TVT1_IO_PSIZE_MASK PPC_BITMASK(19,23)
+#define IODA_TVT1_IO_PSIZE_LSH PPC_BITLSHIFT(23)
+#define IODA_TVT1_PE_NUM_MASK PPC_BITMASK(57,63)
+#define IODA_TVT1_PE_NUM_LSH PPC_BITLSHIFT(63)
+
+/* MVT */
+#define IODA_MVT_VALID PPC_BIT(0)
+#define IODA_MVT_BUS_VALID_MASK PPC_BITMASK(21,23)
+#define IODA_MVT_BUS_VALID_LSH PPC_BITLSHIFT(23)
+#define IODA_MVT_BUS_NUM_MASK PPC_BITMASK(24,31)
+#define IODA_MVT_BUS_NUM_LSH PPC_BITLSHIFT(31)
+#define IODA_MVT_PE_NUM_MASK PPC_BITMASK(41,47)
+#define IODA_MVT_PE_NUM_LSH PPC_BITLSHIFT(47)
+#define IODA_MVT_DEV_VALID PPC_BIT(50)
+#define IODA_MVT_DEV_NUM_MASK PPC_BITMASK(51,55)
+#define IODA_MVT_DEV_NUM_LSH PPC_BITLSHIFT(55)
+#define IODA_MVT_FUNC_VALID PPC_BIT(60)
+#define IODA_MVT_FUNC_NUM_MASK PPC_BITMASK(61,63)
+#define IODA_MVT_FUNC_NUM_LSH PPC_BITLSHIFT(63)
+
+/* PESTA */
+#define IODA_PESTA_MMIO_FROZEN PPC_BIT(0)
+#define IODA_PESTA_MMIO_CAUSE PPC_BIT(2)
+#define IODA_PESTA_CFG_READ PPC_BIT(3)
+#define IODA_PESTA_CFG_WRITE PPC_BIT(4)
+#define IODA_PESTA_TTYPE_MASK PPC_BITMASK(5,7)
+#define IODA_PESTA_TTYPE_LSH PPC_BITLSHIFT(7)
+#define PESTA_TTYPE_DMA_WRITE 0
+#define PESTA_TTYPE_MSI 1
+#define PESTA_TTYPE_DMA_READ 2
+#define PESTA_TTYPE_DMA_READ_RESP 3
+#define PESTA_TTYPE_MMIO_LOAD 4
+#define PESTA_TTYPE_MMIO_STORE 5
+#define PESTA_TTYPE_OTHER 7
+#define IODA_PESTA_CA_RETURN PPC_BIT(8)
+#define IODA_PESTA_UTL_RTOS_TIMEOUT PPC_BIT(8) /* Same bit as CA return */
+#define IODA_PESTA_UR_RETURN PPC_BIT(9)
+#define IODA_PESTA_UTL_NONFATAL PPC_BIT(10)
+#define IODA_PESTA_UTL_FATAL PPC_BIT(11)
+#define IODA_PESTA_TAG_REUSE_ERROR PPC_BIT(12)
+#define IODA_PESTA_PARITY_UE PPC_BIT(13)
+#define IODA_PESTA_UTL_CORRECTABLE PPC_BIT(14)
+#define IODA_PESTA_UTL_INTERRUPT PPC_BIT(15)
+#define IODA_PESTA_MMIO_XLATE PPC_BIT(16)
+#define IODA_PESTA_IODA_ERROR PPC_BIT(16) /* Same bit as MMIO xlate */
+#define IODA_PESTA_TVT_EXT_ERROR PPC_BIT(17)
+#define IODA_PESTA_TCE_PAGE_FAULT PPC_BIT(18)
+#define IODA_PESTA_TCE_ACCESS_FAULT PPC_BIT(19)
+#define IODA_PESTA_DMA_RESP_TIMEOUT PPC_BIT(20)
+#define IODA_PESTA_AIB_SIZE_INVALID PPC_BIT(21)
+#define IODA_PESTA_LEM_BIT_MASK PPC_BITMASK(26,31)
+#define IODA_PESTA_LEM_BIT_LSH PPC_BITLSHIFT(31)
+#define IODA_PESTA_RID_MASK PPC_BITMASK(32,47)
+#define IODA_PESTA_RID_LSH PPC_BITLSHIFT(47)
+#define IODA_PESTA_MSI_DATA_MASK PPC_BITMASK(48,63)
+#define IODA_PESTA_MSI_DATA_LSH PPC_BITLSHIFT(63)
+
+/* PESTB */
+#define IODA_PESTB_DMA_STOPPED PPC_BIT(0)
+#define IODA_PESTB_FAIL_ADDR_MASK PPC_BITMASK(3,63)
+#define IODA_PESTB_FAIL_ADDR_LSH PPC_BITLSHIFT(63)
+
+#endif /* __P7IOC_REGS_H */
diff --git a/include/p7ioc.h b/include/p7ioc.h
new file mode 100644
index 0000000..70c174e
--- /dev/null
+++ b/include/p7ioc.h
@@ -0,0 +1,431 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __P7IOC_H
+#define __P7IOC_H
+
+#include <cec.h>
+#include <pci.h>
+#include <lock.h>
+
+#include <ccan/container_of/container_of.h>
+
+/*
+ * Memory windows and BUID assignment
+ *
+ * - GX BAR assignment
+ *
+ * I don't know of any spec here, so we're going to mimmic what
+ * OPAL seems to be doing:
+ *
+ * - BAR 0 : 32M, disabled. We just leave it alone.
+ * - BAR 1 : 8G, enabled. Appears to correspond to the MMIO
+ * space of the IOC itself and the PCI IO space
+ * - BAR 2: 128G,
+ * - BAR 3: 128G,
+ * - BAR 4: 128G, all 3 contiguous, forming a single 368G region
+ * and is used for M32 and M64 PHB windows.
+ *
+ * - Memory map
+ *
+ * MWIN1 = BAR1 (8G)
+ * MWIN2 = BAR2,3,4 (384G)
+ *
+ * MWIN2 is divided into 6 * 4G regions for use by M32's (*) and
+ * 6 * 32G regions for use by M64's.
+ *
+ * (*) The M32 will typically be configured to only 2G or so, however
+ * the OS is in control of that setting, and since we have to reserve
+ * a power of two, we reserve the whole 4G.
+ *
+ * - RGC registers: MWIN1 + 0x00000000
+ * - PHBn IO space: MWIN1 + 0x01000000 + n * 0x00800000 (8M each)
+ * - PHBn M32 : MWIN2 + n * 0x1_00000000 (4G each)
+ * - PHBn M64 : MWIN2 + (n + 1) * 0x8_00000000 (32G each)
+ *
+ * - BUID map. The RGC has interrupts, each PHB has then its own
+ * interrupts (errors etc...), 4 LSIs and 256 LSIs so
+ * respectively 1 BUID for self, 1 for LSIs and 16 for LSIs
+ *
+ * We keep all BUIDs below 0x10 reserved. They will be used for things
+ * like the PSI controller, the NX unit, etc.. in the P7 chip.
+ *
+ * RGC : 0x010
+ * PHBn LSI : 0x040 + n * 0x40 ( 1 BUID)
+ * PHBn MSI : 0x060 + n * 0x40 (0x10 BUIDs)
+ *
+ * -> For routing, each PHB gets a block of 0x40 BUIDs:
+ *
+ * from 0x40 * (n + 1) to 0x7f * (n + 1)
+ */
+
+/* Some definitions resulting from the above description
+ *
+ * Note: A better approach might be to read the GX BAR content
+ * and isolate the biggest contiguous windows. From there
+ * we could divide things algorithmically and thus be
+ * less sensitive to a change in the memory map by the FSP
+ */
+#define MWIN1_SIZE 0x200000000ul /* MWIN1 is 8G */
+#define MWIN2_SIZE 0x6000000000ul /* MWIN2 is 384G */
+#define PHB_IO_OFFSET 0x01000000ul /* Offset of PHB IO space in MWIN1 */
+#define PHB_IO_SIZE 0x00800000ul
+#define PHB_M32_OFFSET 0x0ul /* Offset of PHB M32 space in MWIN2 */
+#define PHB_M32_SIZE 0x100000000ul
+#define PHB_M64_OFFSET 0x800000000ul /* Offset of PHB M64 space in MWIN2 */
+#define PHB_M64_SIZE 0x800000000ul
+#define RGC_BUID_OFFSET 0x10 /* Offset of RGC BUID */
+#define PHB_BUID_OFFSET 0x40 /* Offset of PHB BUID blocks */
+#define PHB_BUID_SIZE 0x40 /* Size of PHB BUID blocks */
+#define PHB_BUID_LSI_OFFSET 0x00 /* Offset of LSI in PHB BUID block */
+#define PHB_BUID_MSI_OFFSET 0x20 /* Offset of MSI in PHB BUID block */
+#define PHB_BUID_MSI_SIZE 0x10 /* Size of PHB MSI BUID block */
+
+#define PHBn_IO_BASE(n) (PHB_IO_OFFSET + (n) * PHB_IO_SIZE)
+#define PHBn_M32_BASE(n) (PHB_M32_OFFSET + (n) * PHB_M32_SIZE)
+#define PHBn_M64_BASE(n) (PHB_M64_OFFSET + (n) * PHB_M64_SIZE)
+#define PHBn_BUID_BASE(n) (PHB_BUID_OFFSET + (n) * PHB_BUID_SIZE)
+
+#define BUID_TO_PHB(buid) (((buid) - PHB_BUID_OFFSET) / PHB_BUID_SIZE)
+
+/* p7ioc has 6 PHBs */
+#define P7IOC_NUM_PHBS 6
+
+/* M32 window setting at boot:
+ *
+ * To allow for DMA, we need to split the 32-bit PCI address space between
+ * MMIO and DMA. For now, we use a 2G/2G split with MMIO at the top.
+ *
+ * Note: The top 64K of the M32 space are used by MSIs. This is not
+ * visible here but need to be conveyed to the OS one way or another
+ *
+ * Note2: The space reserved in the system address space for M32 is always
+ * 4G. That we chose to use a smaller portion of it is not relevant to
+ * the upper levels. To keep things consistent, the offset we apply to
+ * the window start is also applied on the host side.
+ */
+#define M32_PCI_START 0x80000000
+#define M32_PCI_SIZE 0x80000000
+
+/* PHB registers exist in both a hard coded space and a programmable
+ * AIB space. We program the latter to the values recommended in the
+ * documentation:
+ *
+ * 0x80000 + n * 0x10000
+ */
+#define PHBn_ASB_BASE(n) (((n) << 16))
+#define PHBn_ASB_SIZE 0x10000ul
+#define PHBn_AIB_BASE(n) (0x80000ul + ((n) << 16))
+#define PHBn_AIB_SIZE 0x10000ul
+
+/*
+ * LSI interrupts
+ *
+ * The LSI interrupt block supports 8 interrupts. 4 of them are the
+ * standard PCIe INTA..INTB. The rest is for additional functions
+ * of the PHB
+ */
+#define PHB_LSI_PCIE_INTA 0
+#define PHB_LSI_PCIE_INTB 1
+#define PHB_LSI_PCIE_INTC 2
+#define PHB_LSI_PCIE_INTD 3
+#define PHB_LSI_PCIE_HOTPLUG 4
+#define PHB_LSI_PCIE_PERFCTR 5
+#define PHB_LSI_PCIE_UNUSED 6
+#define PHB_LSI_PCIE_ERROR 7
+
+/*
+ * State structure for a PHB on P7IOC
+ */
+
+/*
+ * The PHB State structure is essentially used during PHB reset
+ * or recovery operations to indicate that the PHB cannot currently
+ * be used for normal operations.
+ *
+ * Some states involve waiting for the timebase to reach a certain
+ * value. In which case the field "delay_tgt_tb" is set and the
+ * state machine will be run from the "state_poll" callback.
+ *
+ * At IPL time, we call this repeatedly during the various sequences
+ * however under OS control, this will require a change in API.
+ *
+ * Fortunately, the OPAL API for slot power & reset are not currently
+ * used by Linux, so changing them isn't going to be an issue. The idea
+ * here is that some of these APIs will return a positive integer when
+ * needing such a delay to proceed. The OS will then be required to
+ * call a new function opal_poll_phb() after that delay. That function
+ * will potentially return a new delay, or OPAL_SUCCESS when the original
+ * operation has completed successfully. If the operation has completed
+ * with an error, then opal_poll_phb() will return that error.
+ *
+ * Note: Should we consider also returning optionally some indication
+ * of what operation is in progress for OS debug/diag purposes ?
+ *
+ * Any attempt at starting a new "asynchronous" operation while one is
+ * already in progress will result in an error.
+ *
+ * Internally, this is represented by the state being P7IOC_PHB_STATE_FUNCTIONAL
+ * when no operation is in progress, which it reaches at the end of the
+ * boot time initializations. Any attempt at performing a slot operation
+ * on a PHB in that state will change the state to the corresponding
+ * operation state machine. Any attempt while not in that state will
+ * return an error.
+ *
+ * Some operations allow for a certain amount of retries, this is
+ * provided for by the "retries" structure member for use by the state
+ * machine as it sees fit.
+ */
+enum p7ioc_phb_state {
+ /* First init state */
+ P7IOC_PHB_STATE_UNINITIALIZED,
+
+ /* During PHB HW inits */
+ P7IOC_PHB_STATE_INITIALIZING,
+
+ /* Set if the PHB is for some reason unusable */
+ P7IOC_PHB_STATE_BROKEN,
+
+ /* Set if the PHB is fenced due to an error */
+ P7IOC_PHB_STATE_FENCED,
+
+ /* PHB turned off by FSP (no clocks) */
+ P7IOC_PHB_STATE_OFF,
+
+ /* Slot Power up state machine */
+ P7IOC_PHB_STATE_SPUP_STABILIZE_DELAY, /* Step 3 Delay 2s */
+ P7IOC_PHB_STATE_SPUP_SLOT_STATUS, /* Step 4 waiting for status */
+
+ /* Slot Power down state machine */
+ P7IOC_PHB_STATE_SPDOWN_STABILIZE_DELAY, /* Step 2 Delay 2s */
+ P7IOC_PHB_STATE_SPDOWN_SLOT_STATUS, /* Step 3 waiting for status */
+
+ /* Fundamental reset sequence */
+ P7IOC_PHB_STATE_FRESET_DISABLE_LINK, /* Disable link training */
+ P7IOC_PHB_STATE_FRESET_ASSERT_DELAY, /* Delay on fundamental reset assert */
+ P7IOC_PHB_STATE_FRESET_DEASSERT_DELAY, /* Delay on fundamental reset deassert */
+ P7IOC_PHB_STATE_FRESET_WAIT_LINK, /* Wait for link up */
+
+ /* Hot Reset sequence */
+ P7IOC_PHB_STATE_HRESET_DISABLE_LINK, /* Disable Link training */
+ P7IOC_PHB_STATE_HRESET_ASSERT, /* Hot reset assert */
+ P7IOC_PHB_STATE_HRESET_DELAY, /* Hot reset delay */
+ P7IOC_PHB_STATE_HRESET_ENABLE_LINK, /* Enable Link training */
+ P7IOC_PHB_STATE_HRESET_WAIT_LINK, /* Wait link traing */
+
+ /* Normal PHB functional state */
+ P7IOC_PHB_STATE_FUNCTIONAL,
+};
+
+/*
+ * In order to support error detection and recovery on different
+ * types of IOCs (e.g. P5IOC, P7IOC, P8IOC), the best bet would
+ * be make the implementation to be 2 layers: OPAL layer and IOC
+ * layer. The OPAL layer just handles the general information and
+ * IOC layer should process much more detailed information, which
+ * is sensitive to itself.
+ */
+#define P7IOC_ERR_SRC_NONE 0
+#define P7IOC_ERR_SRC_EI 1
+#define P7IOC_ERR_SRC_RGC 2
+#define P7IOC_ERR_SRC_BI_UP 3
+#define P7IOC_ERR_SRC_BI_DOWN 4
+#define P7IOC_ERR_SRC_CI_P0 5
+#define P7IOC_ERR_SRC_CI_P1 6
+#define P7IOC_ERR_SRC_CI_P2 7
+#define P7IOC_ERR_SRC_CI_P3 8
+#define P7IOC_ERR_SRC_CI_P4 9
+#define P7IOC_ERR_SRC_CI_P5 10
+#define P7IOC_ERR_SRC_CI_P6 11
+#define P7IOC_ERR_SRC_CI_P7 12
+#define P7IOC_ERR_SRC_PHB0 13
+#define P7IOC_ERR_SRC_PHB1 14
+#define P7IOC_ERR_SRC_PHB2 15
+#define P7IOC_ERR_SRC_PHB3 16
+#define P7IOC_ERR_SRC_PHB4 17
+#define P7IOC_ERR_SRC_PHB5 18
+#define P7IOC_ERR_SRC_MISC 19
+#define P7IOC_ERR_SRC_I2C 20
+#define P7IOC_ERR_SRC_LAST 21
+
+#define P7IOC_ERR_CLASS_NONE 0
+#define P7IOC_ERR_CLASS_GXE 1
+#define P7IOC_ERR_CLASS_PLL 2
+#define P7IOC_ERR_CLASS_RGA 3
+#define P7IOC_ERR_CLASS_PHB 4
+#define P7IOC_ERR_CLASS_ER 5
+#define P7IOC_ERR_CLASS_INF 6
+#define P7IOC_ERR_CLASS_MAL 7
+#define P7IOC_ERR_CLASS_LAST 8
+
+/*
+ * P7IOC error descriptor. For errors from PHB and PE, they
+ * will be cached to the corresponding PHBs. However, the
+ * left errors (e.g. EI, CI Port0/1) will be cached to the
+ * IOC directly.
+ */
+struct p7ioc_err {
+ uint32_t err_src;
+ uint32_t err_class;
+ uint32_t err_bit;
+};
+
+struct p7ioc;
+
+#define P7IOC_PHB_CFG_USE_ASB 1 /* ASB to access PCI-CFG */
+#define P7IOC_PHB_CFG_BLOCKED 2 /* PCI-CFG blocked except 0 */
+
+struct p7ioc_phb {
+ uint8_t index; /* 0..5 index inside p7ioc */
+ uint8_t gen;
+ uint32_t flags;
+ void *regs_asb;
+ void *regs; /* AIB regs */
+ struct lock lock;
+ uint32_t buid_lsi;
+ uint32_t buid_msi;
+ uint64_t io_base;
+ uint64_t m32_base;
+ uint64_t m64_base;
+ enum p7ioc_phb_state state;
+ uint64_t delay_tgt_tb;
+ uint64_t retries;
+ int64_t ecap; /* cached PCI-E cap offset */
+ int64_t aercap; /* cached AER ecap offset */
+ uint64_t lxive_cache[8];
+ uint64_t mxive_cache[256];
+ uint64_t mve_cache[256];
+ uint64_t peltm_cache[128];
+ uint64_t peltv_lo_cache[128];
+ uint64_t peltv_hi_cache[128];
+ uint64_t tve_lo_cache[128];
+ uint64_t tve_hi_cache[128];
+ uint64_t iod_cache[128];
+ uint64_t m32d_cache[128];
+ uint64_t m64b_cache[16];
+ uint64_t m64d_cache[128];
+ bool err_pending;
+ struct p7ioc_err err;
+ struct p7ioc *ioc;
+ struct phb phb;
+};
+
+static inline struct p7ioc_phb *phb_to_p7ioc_phb(struct phb *phb)
+{
+ return container_of(phb, struct p7ioc_phb, phb);
+}
+
+static inline bool p7ioc_phb_err_pending(struct p7ioc_phb *p)
+{
+ return p->err_pending;
+}
+
+static inline void p7ioc_phb_set_err_pending(struct p7ioc_phb *p, bool pending)
+{
+ if (!pending) {
+ p->err.err_src = P7IOC_ERR_SRC_NONE;
+ p->err.err_class = P7IOC_ERR_CLASS_NONE;
+ p->err.err_bit = -1;
+ }
+
+ p->err_pending = pending;
+}
+
+/*
+ * State structure for P7IOC IO HUB
+ */
+struct p7ioc {
+ /* Device node */
+ struct dt_node *dt_node;
+
+ /* MMIO regs */
+ void *regs;
+
+ /* Main MMIO window from GX for registers & PCI IO space */
+ uint64_t mmio1_win_start;
+ uint64_t mmio1_win_size;
+
+ /* Secondary MMIO window for PCI MMIO space */
+ uint64_t mmio2_win_start;
+ uint64_t mmio2_win_size;
+
+ /* BUID base for the PHB. This does include the top bits
+ * (chip, GX bus ID, etc...). This is initialized from the
+ * SPIRA. It does not contain the offset 0x10 for RGC
+ * interrupts.
+ *
+ * The OPAL-defined "interrupt-base" property will contain
+ * the RGC BUID, not this base value, since this is the real
+ * starting point of interrupts for the IOC and we don't want
+ * to cover the BUID 0..f gap which is reserved for P7 on-chip
+ * interrupt sources.
+ */
+ uint32_t buid_base;
+ uint32_t rgc_buid;
+
+ /* XIVT cache for RGC interrupts */
+ uint64_t xive_cache[16];
+ bool err_pending;
+ struct p7ioc_err err;
+
+ /* PHB array & presence detect */
+ struct p7ioc_phb phbs[P7IOC_NUM_PHBS];
+ uint8_t phb_pdt;
+
+ struct io_hub hub;
+};
+
+static inline struct p7ioc *iohub_to_p7ioc(struct io_hub *hub)
+{
+ return container_of(hub, struct p7ioc, hub);
+}
+
+static inline bool p7ioc_err_pending(struct p7ioc *ioc)
+{
+ return ioc->err_pending;
+}
+
+static inline void p7ioc_set_err_pending(struct p7ioc *ioc, bool pending)
+{
+ if (!pending) {
+ ioc->err.err_src = P7IOC_ERR_SRC_NONE;
+ ioc->err.err_class = P7IOC_ERR_CLASS_NONE;
+ ioc->err.err_bit = -1;
+ }
+
+ ioc->err_pending = pending;
+}
+
+static inline bool p7ioc_phb_enabled(struct p7ioc *ioc, unsigned int phb)
+{
+ return !!(ioc->phb_pdt & (0x80 >> phb));
+}
+
+extern int64_t p7ioc_inits(struct p7ioc *ioc);
+
+extern void p7ioc_phb_setup(struct p7ioc *ioc, uint8_t index);
+extern int64_t p7ioc_phb_init(struct p7ioc_phb *p);
+
+extern bool p7ioc_check_LEM(struct p7ioc *ioc, uint16_t *pci_error_type,
+ uint16_t *severity);
+extern int64_t p7ioc_phb_get_xive(struct p7ioc_phb *p, uint32_t isn,
+ uint16_t *server, uint8_t *prio);
+extern int64_t p7ioc_phb_set_xive(struct p7ioc_phb *p, uint32_t isn,
+ uint16_t server, uint8_t prio);
+extern void p7ioc_reset(struct io_hub *hub);
+extern void p7ioc_phb_reset(struct phb *phb);
+
+#endif /* __P7IOC_H */
diff --git a/include/pci-cfg.h b/include/pci-cfg.h
new file mode 100644
index 0000000..7c98f3f
--- /dev/null
+++ b/include/pci-cfg.h
@@ -0,0 +1,524 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * PCI Configuration space definitions
+ */
+#ifndef __PCI_CFG_H
+#define __PCI_CFG_H
+
+/* Common cfg space header */
+#define PCI_CFG_VENDOR_ID 0x0000
+#define PCI_CFG_DEVICE_ID 0x0002
+#define PCI_CFG_CMD 0x0004
+#define PCI_CFG_CMD_IO_EN 0x0001
+#define PCI_CFG_CMD_MEM_EN 0x0002
+#define PCI_CFG_CMD_BUS_MASTER_EN 0x0004
+#define PCI_CFG_CMD_PERR_RESP 0x0040
+#define PCI_CFG_CMD_SERR_EN 0x0100
+#define PCI_CFG_CMD_INTx_DIS 0x0400
+#define PCI_CFG_STAT 0x0006
+#define PCI_CFG_STAT_INTx 0x0008
+#define PCI_CFG_STAT_CAP 0x0010
+#define PCI_CFG_STAT_MDATAPERR 0x0100
+#define PCI_CFG_STAT_SENT_TABORT 0x0800
+#define PCI_CFG_STAT_RECV_TABORT 0x1000
+#define PCI_CFG_STAT_RECV_MABORT 0x2000
+#define PCI_CFG_STAT_SENT_SERR 0x4000
+#define PCI_CFG_STAT_RECV_PERR 0x8000
+#define PCI_CFG_REV_ID 0x0008
+#define PCI_CFG_CLAS_CODE 0x0009
+#define PCI_CFG_CACHE_LINE_SIZE 0x000c
+#define PCI_CFG_LAT_TIMER 0x000d
+#define PCI_CFG_HDR_TYPE 0x000e
+#define PCI_CFG_BIST 0x000f
+#define PCI_CFG_BAR0 0x0010
+#define PCI_CFG_BAR_TYPE_MASK 0x00000001
+#define PCI_CFG_BAR_TYPE_MEM 0x00000000
+#define PCI_CFG_BAR_TYPE_IO 0x00000001
+#define PCI_CFG_BAR_MEM64 0x00000004
+#define PCI_CFG_BAR_MEM_PREFETCH 0x00000008
+#define PCI_CFG_BAR1 0x0014
+
+/* Type 0 fields */
+#define PCI_CFG_BAR2 0x0018
+#define PCI_CFG_BAR3 0x001c
+#define PCI_CFG_BAR4 0x0020
+#define PCI_CFG_BAR5 0x0024
+#define PCI_CFG_CARDBUS_CIS 0x0028
+#define PCI_CFG_SUBSYS_VENDOR_ID 0x002c
+#define PCI_CFG_SUBSYS_ID 0x002e
+#define PCI_CFG_ROMBAR 0x0030
+#define PCI_CFG_CAP 0x0034
+#define PCI_CFG_INT_LINE 0x003c
+#define PCI_CFG_INT_PIN 0x003d
+#define PCI_CFG_INT_MIN_GNT 0x003e
+#define PCI_CFG_INT_MAX_LAT 0x003f
+
+/* Type 1 fields */
+#define PCI_CFG_PRIMARY_BUS 0x0018
+#define PCI_CFG_SECONDARY_BUS 0x0019
+#define PCI_CFG_SUBORDINATE_BUS 0x001a
+#define PCI_CFG_SEC_LAT_TIMER 0x001b
+#define PCI_CFG_IO_BASE 0x001c
+#define PCI_CFG_IO_LIMIT 0x001d
+#define PCI_CFG_SECONDARY_STATUS 0x001e
+#define PCI_CFG_MEM_BASE 0x0020
+#define PCI_CFG_MEM_LIMIT 0x0022
+#define PCI_CFG_PREF_MEM_BASE 0x0024
+#define PCI_CFG_PREF_MEM_LIMIT 0x0026
+#define PCI_CFG_PREF_MEM_BASE_U32 0x0028
+#define PCI_CFG_PREF_MEM_LIMIT_U32 0x002c
+#define PCI_CFG_IO_BASE_U16 0x0030
+#define PCI_CFG_IO_LIMIT_U16 0x0032
+#define PCI_CFG_BR_CAP 0x0034 /* Same as type 0 */
+#define PCI_CFG_BR_ROMBAR 0x0038 /* Different from type 0 */
+#define PCI_CFG_BR_INT_LINE 0x003c /* Same as type 0 */
+#define PCI_CFG_BR_INT_PIN 0x003d /* Same as type 0 */
+#define PCI_CFG_BRCTL 0x003e
+#define PCI_CFG_BRCTL_PERR_RESP_EN 0x0001
+#define PCI_CFG_BRCTL_SERR_EN 0x0002
+#define PCI_CFG_BRCTL_ISA_EN 0x0004
+#define PCI_CFG_BRCTL_VGA_EN 0x0008
+#define PCI_CFG_BRCTL_VGA_16BIT 0x0010
+#define PCI_CFG_BRCTL_MABORT_REPORT 0x0020
+#define PCI_CFG_BRCTL_SECONDARY_RESET 0x0040
+#define PCI_CFG_BRCTL_FAST_BACK2BACK 0x0080
+#define PCI_CFG_BRCTL_PRI_DISC_TIMER 0x0100
+#define PCI_CFG_BRCTL_SEC_DISC_TIMER 0x0200
+#define PCI_CFG_BRCTL_DISC_TIMER_STAT 0x0400
+#define PCI_CFG_BRCTL_DISC_TIMER_SERR 0x0800
+
+/*
+ * Standard capabilties
+ */
+#define PCI_CFG_CAP_ID 0
+#define PCI_CFG_CAP_NEXT 1
+
+/* PCI bridge subsystem ID capability */
+#define PCI_CFG_CAP_ID_SUBSYS_VID 0x0d
+#define PCICAP_SUBSYS_VID_VENDOR 4
+#define PCICAP_SUBSYS_VID_DEVICE 6
+
+/* PCI Express capability */
+#define PCI_CFG_CAP_ID_EXP 0x10
+/* PCI Express capability fields */
+#define PCICAP_EXP_CAPABILITY_REG 0x02
+#define PCICAP_EXP_CAP_VERSION_MASK 0x000f
+#define PCICAP_EXP_CAP_VERSION_LSH 0
+#define PCICAP_EXP_CAP_TYPE_MASK 0x00f0
+#define PCICAP_EXP_CAP_TYPE_LSH 4
+#define PCIE_TYPE_ENDPOINT 0x0
+#define PCIE_TYPE_LEGACY 0x1
+#define PCIE_TYPE_ROOT_PORT 0x4
+#define PCIE_TYPE_SWITCH_UPPORT 0x5
+#define PCIE_TYPE_SWITCH_DNPORT 0x6
+#define PCIE_TYPE_PCIE_TO_PCIX 0x7
+#define PCIE_TYPE_PCIX_TO_PCIE 0x8
+#define PCIE_TYPE_RC_INTEGRATED 0x9
+#define PCIE_TYPE_RC_EVT_COLL 0xa
+#define PCICAP_EXP_CAP_SLOT 0x0100
+#define PCICAP_EXP_CAP_MSI_NUM_MASK 0x3e00
+#define PCICAP_EXP_CAP_MSI_NUM_LSH 9
+#define PCICAP_EXP_CAP_TCS_ROUTING 0x4000
+#define PCICAP_EXP_DEVCAP 0x04
+#define PCICAP_EXP_DEVCAP_MPSS_MASK 0x00000007
+#define PCICAP_EXP_DEVCAP_MPSS_LSH 0
+#define PCIE_MPSS_128 0
+#define PCIE_MPSS_256 1
+#define PCIE_MPSS_512 2
+#define PCIE_MPSS_1024 3
+#define PCIE_MPSS_2048 4
+#define PCIE_MPSS_4096 5
+#define PCICAP_EXP_DEVCAP_PHANT_MASK 0x00000018
+#define PCICAP_EXP_DEVCAP_PHANT_LSH 3
+#define PCIE_PHANTOM_NONE 0
+#define PCIE_PHANTOM_1MSB 1
+#define PCIE_PHANTOM_2MSB 2
+#define PCIE_PHANTOM_3MSB 3
+#define PCICAP_EXP_DEVCAP_EXTTAG 0x00000020
+#define PCICAP_EXP_DEVCAP_L0SL_MASK 0x000001c0
+#define PCICAP_EXP_DEVCAP_L0SL_LSH 6
+#define PCIE_L0SL_MAX_64NS 0
+#define PCIE_L0SL_MAX_128NS 1
+#define PCIE_L0SL_MAX_256NS 2
+#define PCIE_L0SL_MAX_512NS 3
+#define PCIE_L0SL_MAX_1US 4
+#define PCIE_L0SL_MAX_2US 5
+#define PCIE_L0SL_MAX_4US 6
+#define PCIE_L0SL_MAX_NO_LIMIT 7
+#define PCICAP_EXP_DEVCAP_L1L_MASK 0x00000e00
+#define PCICAP_EXP_DEVCAP_L1L_LSH 9
+#define PCIE_L1L_MAX_1US 0
+#define PCIE_L1L_MAX_2US 1
+#define PCIE_L1L_MAX_4US 2
+#define PCIE_L1L_MAX_8US 3
+#define PCIE_L1L_MAX_16US 4
+#define PCIE_L1L_MAX_32US 5
+#define PCIE_L1L_MAX_64US 6
+#define PCIE_L1L_MAX_NO_LIMIT 7
+#define PCICAP_EXP_ROLE_BASED_ERR 0x00008000
+#define PCICAP_EXP_DEVCAP_PWRVAL_MASK 0x03fc0000
+#define PCICAP_EXP_DEVCAP_PWRVAL_LSH 18
+#define PCICAP_EXP_DEVCAP_PWRSCA_MASK 0x0c000000
+#define PCICAP_EXP_DEVCAP_PWRSCA_LSH 26
+#define PCIE_SLOT_PWR_SCALE_1x 0
+#define PCIE_SLOT_PWR_SCALE_0d1x 1
+#define PCIE_SLOT_PWR_SCALE_0d01x 2
+#define PCIE_SLOT_PWR_SCALE_0d001x 3
+#define PCICAP_EXP_DEVCAP_FUNC_RESET 0x10000000
+#define PCICAP_EXP_DEVCTL 0x08
+#define PCICAP_EXP_DEVCTL_CE_REPORT 0x0001
+#define PCICAP_EXP_DEVCTL_NFE_REPORT 0x0002
+#define PCICAP_EXP_DEVCTL_FE_REPORT 0x0004
+#define PCICAP_EXP_DEVCTL_UR_REPORT 0x0008
+#define PCICAP_EXP_DEVCTL_RELAX_ORD 0x0010
+#define PCICAP_EXP_DEVCTL_MPS_MASK 0x00e0
+#define PCICAP_EXP_DEVCTL_MPS_LSH 5
+#define PCIE_MPS_128B 0
+#define PCIE_MPS_256B 1
+#define PCIE_MPS_512B 2
+#define PCIE_MPS_1024B 3
+#define PCIE_MPS_2048B 4
+#define PCIE_MPS_4096B 5
+#define PCICAP_EXP_DEVCTL_EXT_TAG 0x0100
+#define PCICAP_EXP_DEVCTL_PHANTOM 0x0200
+#define PCICAP_EXP_DEVCTL_AUX_POW_PM 0x0400
+#define PCICAP_EXP_DEVCTL_NO_SNOOP 0x0800
+#define PCICAP_EXP_DEVCTL_MRRS_MASK 0x7000
+#define PCICAP_EXP_DEVCTL_MRRS_LSH 12
+#define PCIE_MRSS_128B 0
+#define PCIE_MRSS_256B 1
+#define PCIE_MRSS_512B 2
+#define PCIE_MRSS_1024B 3
+#define PCIE_MRSS_2048B 4
+#define PCIE_MRSS_4096B 5
+#define PCICAP_EXP_DEVCTL_PCIX_RETRY 0x8000 /* PCIe - PCIX bridges only */
+#define PCICAP_EXP_DEVCTL_FUNC_RESET 0x8000 /* all others */
+#define PCICAP_EXP_DEVSTAT 0x0a
+#define PCICAP_EXP_DEVSTAT_CE 0x0001
+#define PCICAP_EXP_DEVSTAT_NFE 0x0002
+#define PCICAP_EXP_DEVSTAT_FE 0x0004
+#define PCICAP_EXP_DEVSTAT_UE 0x0008
+#define PCICAP_EXP_DEVSTAT_AUX_POW 0x0010
+#define PCICAP_EXP_DEVSTAT_TPEND 0x0020
+#define PCICAP_EXP_LCAP 0x0c
+#define PCICAP_EXP_LCAP_MAXSPD_MASK 0x0000000f
+#define PCICAP_EXP_LCAP_MAXSPD_LSH 0
+#define PCIE_LSPEED_VECBIT_0 0x1
+#define PCIE_LSPEED_VECBIT_1 0x2
+#define PCIE_LSPEED_VECBIT_2 0x3
+#define PCIE_LSPEED_VECBIT_3 0x4
+#define PCIE_LSPEED_VECBIT_4 0x5
+#define PCIE_LSPEED_VECBIT_5 0x6
+#define PCIE_LSPEED_VECBIT_6 0x7
+#define PCICAP_EXP_LCAP_MAXWDTH_MASK 0x000003f0
+#define PCICAP_EXP_LCAP_MAXWDTH_LSH 4
+#define PCIE_LWIDTH_1X 1
+#define PCIE_LWIDTH_2X 2
+#define PCIE_LWIDTH_4X 4
+#define PCIE_LWIDTH_8X 8
+#define PCIE_LWIDTH_12X 12
+#define PCIE_LWIDTH_16X 16
+#define PCIE_LWIDTH_32X 32
+#define PCICAP_EXP_LCAP_ASPM_L0S 0x00000400
+#define PCICAP_EXP_LCAP_ASPM_L1 0x00000800
+#define PCICAP_EXP_LCAP_L0S_EXLT_MASK 0x00007000
+#define PCICAP_EXP_LCAP_L0S_EXLT_LSH 12
+#define PCIE_L0S_EXLT_LESS_64NS 0
+#define PCIE_L0S_EXLT_64NS_128NS 1
+#define PCIE_L0S_EXLT_128NS_256NS 2
+#define PCIE_L0S_EXLT_256NS_512NS 3
+#define PCIE_L0S_EXLT_512NS_1US 4
+#define PCIE_L0S_EXLT_1US_2US 5
+#define PCIE_L0S_EXLT_2US_4US 6
+#define PCIE_L0S_EXLT_MORE_4US 7
+#define PCICAP_EXP_LCAP_L1_EXLT_MASK 0x00038000
+#define PCICAP_EXP_LCAP_L1_EXLT_LSH 15
+#define PCIE_L1_EXLT_LESS_1US 0
+#define PCIE_L1_EXLT_1US_2US 1
+#define PCIE_L1_EXLT_2US_4US 2
+#define PCIE_L1_EXLT_4US_8US 3
+#define PCIE_L1_EXLT_8US_16US 4
+#define PCIE_L1_EXLT_16US_32US 5
+#define PCIE_L1_EXLT_32US_64US 6
+#define PCIE_L1_EXLT_MORE_64US 7
+#define PCICAP_EXP_LCAP_CLK_PM 0x00040000
+#define PCICAP_EXP_LCAP_SURP_DWN_ERR 0x00080000
+#define PCICAP_EXP_LCAP_DL_ACT_REP 0x00100000
+#define PCICAP_EXP_LCAP_LNKBWDTH_NOTF 0x00200000
+#define PCICAP_EXP_LCAP_ASPM_OPT_CMPL 0x00400000
+#define PCICAP_EXP_LCAP_PORTNUM_MASK 0xff000000
+#define PCICAP_EXP_LCAP_PORTNUM_LSH 24
+#define PCICAP_EXP_LCTL 0x10
+#define PCICAP_EXP_LCTL_ASPM_L0S 0x0001
+#define PCICAP_EXP_LCTL_ASPM_L1 0x0002
+#define PCICAP_EXP_LCTL_RCB 0x0008 /* RO on root ports */
+#define PCICAP_EXP_LCTL_LINK_DIS 0x0010
+#define PCICAP_EXP_LCTL_LINK_RETRAIN 0x0020
+#define PCICAP_EXP_LCTL_COMMON_CLK 0x0040
+#define PCICAP_EXP_LCTL_EXT_SYNCH 0x0080
+#define PCICAP_EXP_LCTL_CLOCK_PM 0x0100
+#define PCICAP_EXP_LCTL_HW_AWIDTH_DIS 0x0200
+#define PCICAP_EXP_LCTL_LBWM_INT_EN 0x0400
+#define PCICAP_EXP_LCTL_LAWD_INT_EN 0x0800
+#define PCICAP_EXP_LSTAT 0x12
+#define PCICAP_EXP_LSTAT_SPEED_MASK 0x000f
+#define PCICAP_EXP_LSTAT_SPEED_LSH 0 /* use PCIE_LSPEED_* consts */
+#define PCICAP_EXP_LSTAT_WIDTH_MASK 0x03f0
+#define PCICAP_EXP_LSTAT_WIDTH_LSH 4 /* use PCIE_LWIDTH_* consts */
+#define PCICAP_EXP_LSTAT_TRAINING 0x0800
+#define PCICAP_EXP_LSTAT_SLOTCLKCFG 0x1000
+#define PCICAP_EXP_LSTAT_DLLL_ACT 0x2000
+#define PCICAP_EXP_LSTAT_LBWM_STAT 0x4000
+#define PCICAP_EXP_LSTAT_LAWS_STAT 0x8000
+#define PCICAP_EXP_SLOTCAP 0x14
+#define PCICAP_EXP_SLOTCAP_ATTNB 0x00000001
+#define PCICAP_EXP_SLOTCAP_PWCTRL 0x00000002
+#define PCICAP_EXP_SLOTCAP_MRLSENS 0x00000004
+#define PCICAP_EXP_SLOTCAP_ATTNI 0x00000008
+#define PCICAP_EXP_SLOTCAP_PWRI 0x00000010
+#define PCICAP_EXP_SLOTCAP_HPLUG_SURP 0x00000020
+#define PCICAP_EXP_SLOTCAP_HPLUG_CAP 0x00000040
+#define PCICAP_EXP_SLOTCAP_SPLVA_MASK 0x00007f80
+#define PCICAP_EXP_SLOTCAP_SPLLVA_LSH 7
+#define PCICAP_EXP_SLOTCAP_SPLSC_MASK 0x00018000
+#define PCICAP_EXP_SLOTCAP_SPLSC_LSH 15
+#define PCICAP_EXP_SLOTCAP_EIP 0x00020000
+#define PCICAP_EXP_SLOTCAP_NO_CMDCOMP 0x00040000
+#define PCICAP_EXP_SLOTCAP_PSLOT_MASK 0xfff80000
+#define PCICAP_EXP_SLOTCAP_PSLOT_LSH 19
+#define PCICAP_EXP_SLOTCTL 0x18
+#define PCICAP_EXP_SLOTCTL_ATTNB 0x0001
+#define PCICAP_EXP_SLOTCTL_PFLT 0x0002
+#define PCICAP_EXP_SLOTCTL_MRLSENSE 0x0004
+#define PCICAP_EXP_SLOTCTL_PDETECT 0x0008
+#define PCICAP_EXP_SLOTCTL_CMDCOMPINT 0x0010
+#define PCICAP_EXP_SLOTCTL_HPINT 0x0020
+#define PCICAP_EXP_SLOTCTL_ATTNI_MASK 0x00c0
+#define PCICAP_EXP_SLOTCTL_ATTNI_LSH 6
+#define PCIE_INDIC_ON 1
+#define PCIE_INDIC_BLINK 2
+#define PCIE_INDIC_OFF 3
+#define PCICAP_EXP_SLOTCTL_PWRI_MASK 0x0300
+#define PCICAP_EXP_SLOTCTL_PWRI_LSH 8 /* Use PCIE_INDIC_* consts */
+#define PCICAP_EXP_SLOTCTL_PWRCTLR 0x0400
+#define PCICAP_EXP_SLOTCTL_EIC 0x0800
+#define PCICAP_EXP_SLOTCTL_DLLSTCHG 0x1000
+#define PCICAP_EXP_SLOTSTAT 0x1a
+#define PCICAP_EXP_SLOTSTAT_ATTNBCH 0x0001
+#define PCICAP_EXP_SLOTSTAT_PWRFLTCH 0x0002
+#define PCICAP_EXP_SLOTSTAT_MRLSENSCH 0x0004
+#define PCICAP_EXP_SLOTSTAT_PDETECTCH 0x0008
+#define PCICAP_EXP_SLOTSTAT_CMDCOMPCH 0x0010
+#define PCICAP_EXP_SLOTSTAT_MRLSENSST 0x0020
+#define PCICAP_EXP_SLOTSTAT_PDETECTST 0x0040
+#define PCICAP_EXP_SLOTSTAT_EIS 0x0080
+#define PCICAP_EXP_SLOTSTAT_DLLSTCH 0x0100
+#define PCICAP_EXP_RC 0x1c
+#define PCICAP_EXP_RC_SYSERR_ON_CE 0x0001
+#define PCICAP_EXP_RC_SYSERR_ON_NFE 0x0002
+#define PCICAP_EXP_RC_SYSERR_ON_FE 0x0004
+#define PCICAP_EXP_RC_PME_INT_EN 0x0008
+#define PCICAP_EXP_RC_CRS_VISIBLE 0x0010
+#define PCICAP_EXP_RCAP 0x1e
+#define PCICAP_EXP_RCAP_CRS_VISIBLE 0x0001
+#define PCICAP_EXP_RSTAT 0x20
+#define PCICAP_EXP_RSTAT_PME_RID_MASK 0x0000ffff
+#define PCICAP_EXP_RSTAT_PME_RID_LSH 0
+#define PCICAP_EXP_RSTAT_PME_STATUS 0x00010000
+#define PCICAP_EXP_RSTAT_PME_PENDING 0x00020000
+#define PCIECAP_EXP_DCAP2 0x24
+#define PCICAP_EXP_DCAP2_CMPTOUT_MASK 0x0000000f
+#define PCICAP_EXP_DCAP2_CMPTOUT_LSH 0
+#define PCICAP_EXP_DCAP2_CMPTOUT_DIS 0x00000010
+#define PCICAP_EXP_DCAP2_ARI_FWD 0x00000020
+#define PCICAP_EXP_DCAP2_ATOMIC_RTE 0x00000040
+#define PCICAP_EXP_DCAP2_ATOMIC32 0x00000080
+#define PCICAP_EXP_DCAP2_ATOMIC64 0x00000100
+#define PCICAP_EXP_DCAP2_CAS128 0x00000200
+#define PCICAP_EXP_DCAP2_NORO_PRPR 0x00000400
+#define PCICAP_EXP_DCAP2_LTR 0x00000800
+#define PCICAP_EXP_DCAP2_TPHCOMP 0x00001000
+#define PCICAP_EXP_DCAP2_TPHCOMP_EXT 0x00002000
+#define PCICAP_EXP_DCAP2_OBFF_MSG 0x00040000
+#define PCICAP_EXP_DCAP2_OBFF_WAKE 0x00080000
+#define PCICAP_EXP_DCAP2_EXTFMT 0x00100000
+#define PCICAP_EXP_DCAP2_EETLP_PFX 0x00200000
+#define PCICAP_EXP_DCAP2_MAXEETP_MASK 0x00c00000
+#define PCICAP_EXP_DCAP2_MAXEETP_LSH 22
+#define PCIE_EETLPP_1 1
+#define PCIE_EETLPP_2 2
+#define PCIE_EETLPP_3 3
+#define PCIE_EETLPP_4 0
+#define PCICAP_EXP_DCTL2 0x28
+#define PCICAP_EXP_DCTL2_CMPTOUT_MASK 0x000f
+#define PCICAP_EXP_DCTL2_CMPTOUT_LSH 0
+#define PCICAP_EXP_DCTL2_CMPTOUT_DIS 0x0010
+#define PCICAP_EXP_DCTL2_ARI_FWD 0x0020
+#define PCICAP_EXP_DCTL2_ATOMIC_REQ 0x0040
+#define PCICAP_EXP_DCTL2_ATOMIC_EGBLK 0x0080
+#define PCICAP_EXP_DCTL2_IDO_REQ 0x0100
+#define PCICAP_EXP_DCTL2_IDO_COMPL 0x0200
+#define PCICAP_EXP_DCTL2_LTR 0x0400
+#define PCICAP_EXP_DCTL2_OBFF_MASK 0x6000
+#define PCICAP_EXP_DCTL2_OBFF_LSH 13
+#define PCIE_OBFF_MODE_DISABLED 0
+#define PCIE_OBFF_MODE_MSG_A 1
+#define PCIE_OBFF_MODE_MSG_B 2
+#define PCIE_OBFF_MODE_WAKE 3
+#define PCICAP_EXP_DCTL2_EETLPP_BLK 0x8000
+#define PCICAP_EXP_DSTA2 0x2a
+#define PCICAP_EXP_LCAP2 0x2c
+#define PCICAP_EXP_LCAP2_SP_2d5GTs 0x00000002
+#define PCICAP_EXP_LCAP2_SP_5d0GTs 0x00000004
+#define PCICAP_EXP_LCAP2_SP_8d0GTs 0x00000008
+#define PCICAP_EXP_LCAP2_XLINK 0x00000100
+#define PCICAP_EXP_LCTL2 0x30
+#define PCICAP_EXP_LCTL2_TLSPD_MASK 0x000f
+#define PCICAP_EXP_LCTL2_TLSPD_LSH 0 /* use PCIE_LSPEED_ consts */
+#define PCICAP_EXP_LCTL2_ENTER_COMPL 0x0010
+#define PCICAP_EXP_LCTL2_HWAUTSPDIS 0x0020
+#define PCICAP_EXP_LCTL2_SEL_DEEMPH 0x0040
+#define PCICAP_EXP_LCTL2_XMTMARG_MASK 0x0380
+#define PCICAP_EXP_LCTL2_XMTMARG_LSH 7
+#define PCICAP_EXP_LCTL2_ENTER_MCOMPL 0x0400
+#define PCICAP_EXP_LCTL2_COMPL_SOS 0x0800
+#define PCICAP_EXP_LCTL2_CMPPDEM_MASK 0xf000
+#define PCICAP_EXP_LCTL2_CMPPDEM_LSH 12
+#define PCICAP_EXP_LSTA2 0x32
+#define PCICAP_EXP_LSTA2_DEMPH_LVL 0x0001
+#define PCICAP_EXP_LSTA2_EQ_COMPLETE 0x0002
+#define PCICAP_EXP_LSTA2_EQ_PH1_OK 0x0004
+#define PCICAP_EXP_LSTA2_EQ_PH2_OK 0x0008
+#define PCICAP_EXP_LSTA2_EQ_PH3_OK 0x0010
+#define PCICAP_EXP_LSTA2_LINK_EQ_REQ 0x0020
+#define PCICAP_EXP_SCAP2 0x34
+#define PCICAP_EXP_SCTL2 0x38
+#define PCICAP_EXP_SSTA2 0x3a
+
+/*
+ * PCI-E Extended capabilties
+ */
+#define PCI_CFG_ECAP_START 0x100
+#define PCI_CFG_ECAP_ID_MASK 0x0000ffff
+#define PCI_CFG_ECAP_ID_LSH 0
+#define PCI_CFG_ECAP_VERS_MASK 0x000f0000
+#define PCI_CFG_ECAP_VERS_LSH 16
+#define PCI_CFG_ECAP_NEXT_MASK 0xfff00000
+#define PCI_CFG_ECAP_NEXT_LSH 20
+
+/* AER Ext. Capability */
+#define PCIECAP_ID_AER 0x0001
+#define PCIECAP_AER_UE_STATUS 0x04
+#define PCIECAP_AER_UE_DLP 0x00000010
+#define PCIECAP_AER_UE_SURPRISE_DOWN 0x00000020
+#define PCIECAP_AER_UE_POISON_TLP 0x00001000
+#define PCIECAP_AER_UE_FLOW_CTL_PROT 0x00002000
+#define PCIECAP_AER_UE_COMPL_TIMEOUT 0x00004000
+#define PCIECAP_AER_UE_COMPL_ABORT 0x00008000
+#define PCIECAP_AER_UE_UNEXP_COMPL 0x00010000
+#define PCIECAP_AER_UE_RECV_OVFLOW 0x00020000
+#define PCIECAP_AER_UE_MALFORMED_TLP 0x00040000
+#define PCIECAP_AER_UE_ECRC 0x00080000
+#define PCIECAP_AER_UE_UNSUPP_REQ 0x00100000
+#define PCIECAP_AER_UE_ACS_VIOLATION 0x00200000
+#define PCIECAP_AER_UE_INTERNAL 0x00400000
+#define PCIECAP_AER_UE_MC_BLKD_TLP 0x00800000
+#define PCIECAP_AER_UE_ATOMIC_EGBLK 0x01000000
+#define PCIECAP_AER_UE_TLP_PRFX_BLK 0x02000000
+#define PCIECAP_AER_UE_MASK 0x08
+#define PCIECAP_AER_UE_MASK_DLLP 0x00000010
+#define PCIECAP_AER_UE_MASK_SURPRISE_DOWN 0x00000020
+#define PCIECAP_AER_UE_MASK_POISON_TLP 0x00001000
+#define PCIECAP_AER_UE_MASK_FLOW_CTL_PROT 0x00002000
+#define PCIECAP_AER_UE_MASK_COMPL_TIMEOUT 0x00004000
+#define PCIECAP_AER_UE_MASK_COMPL_ABORT 0x00008000
+#define PCIECAP_AER_UE_MASK_UNEXP_COMPL 0x00010000
+#define PCIECAP_AER_UE_MASK_RECV_OVFLOW 0x00020000
+#define PCIECAP_AER_UE_MASK_MALFORMED_TLP 0x00040000
+#define PCIECAP_AER_UE_MASK_ECRC 0x00080000
+#define PCIECAP_AER_UE_MASK_UNSUPP_REQ 0x00100000
+#define PCIECAP_AER_UE_SEVERITY 0x0c
+#define PCIECAP_AER_UE_SEVERITY_DLLP 0x00000010
+#define PCIECAP_AER_UE_SEVERITY_SURPRISE_DOWN 0x00000020
+#define PCIECAP_AER_UE_SEVERITY_POISON_TLP 0x00001000
+#define PCIECAP_AER_UE_SEVERITY_FLOW_CTL_PROT 0x00002000
+#define PCIECAP_AER_UE_SEVERITY_COMPL_TIMEOUT 0x00004000
+#define PCIECAP_AER_UE_SEVERITY_COMPL_ABORT 0x00008000
+#define PCIECAP_AER_UE_SEVERITY_UNEXP_COMPL 0x00010000
+#define PCIECAP_AER_UE_SEVERITY_RECV_OVFLOW 0x00020000
+#define PCIECAP_AER_UE_SEVERITY_MALFORMED_TLP 0x00040000
+#define PCIECAP_AER_UE_SEVERITY_ECRC 0x00080000
+#define PCIECAP_AER_UE_SEVERITY_UNSUPP_REQ 0x00100000
+#define PCIECAP_AER_UE_SEVERITY_INTERNAL 0x00400000
+#define PCIECAP_AER_CE_STATUS 0x10
+#define PCIECAP_AER_CE_RECVR_ERR 0x00000001
+#define PCIECAP_AER_CE_BAD_TLP 0x00000040
+#define PCIECAP_AER_CE_BAD_DLLP 0x00000080
+#define PCIECAP_AER_CE_REPLAY_ROLLVR 0x00000100
+#define PCIECAP_AER_CE_REPLAY_TMR_TO 0x00001000
+#define PCIECAP_AER_CE_ADV_NONFATAL 0x00002000
+#define PCIECAP_AER_CE_CORTD_INTERNAL 0x00004000
+#define PCIECAP_AER_CE_HDR_LOG_OVFL 0x00008000
+#define PCIECAP_AER_CE_MASK 0x14
+#define PCIECAP_AER_CE_MASK_RECVR_ERR 0x00000001
+#define PCIECAP_AER_CE_MASK_BAD_TLP 0x00000040
+#define PCIECAP_AER_CE_MASK_BAD_DLLP 0x00000080
+#define PCIECAP_AER_CE_MASK_REPLAY_ROLLVR 0x00000100
+#define PCIECAP_AER_CE_MASK_REPLAY_TMR_TO 0x00001000
+#define PCIECAP_AER_CE_MASK_ADV_NONFATAL 0x00002000
+#define PCIECAP_AER_CE_MASK_CORTD_INTERNAL 0x00004000
+#define PCIECAP_AER_CE_MASK_HDR_LOG_OVFL 0x00008000
+#define PCIECAP_AER_CAPCTL 0x18
+#define PCIECAP_AER_CAPCTL_FPTR_MASK 0x0000001f
+#define PCIECAP_AER_CAPCTL_FPTR_LSH 0
+#define PCIECAP_AER_CAPCTL_ECRCG_CAP 0x00000020
+#define PCIECAP_AER_CAPCTL_ECRCG_EN 0x00000040
+#define PCIECAP_AER_CAPCTL_ECRCC_CAP 0x00000080
+#define PCIECAP_AER_CAPCTL_ECRCC_EN 0x00000100
+#define PCIECAP_AER_CAPCTL_MHREC_CAP 0x00000200
+#define PCIECAP_AER_CAPCTL_MHREC_EN 0x00000400
+#define PCIECAP_AER_CAPCTL_TLPPL_PR 0x00000800
+#define PCIECAP_AER_HDR_LOG0 0x1c
+#define PCIECAP_AER_HDR_LOG1 0x20
+#define PCIECAP_AER_HDR_LOG2 0x24
+#define PCIECAP_AER_HDR_LOG3 0x28
+#define PCIECAP_AER_RERR_CMD 0x2c
+#define PCIECAP_AER_RERR_CMD_FE 0x00000001
+#define PCIECAP_AER_RERR_CMD_NFE 0x00000002
+#define PCIECAP_AER_RERR_CMD_CE 0x00000004
+#define PCIECAP_AER_RERR_STA 0x30
+#define PCIECAP_AER_RERR_STA_CORR 0x00000001
+#define PCIECAP_AER_RERR_STA_MCORR 0x00000002
+#define PCIECAP_AER_RERR_STA_FNF 0x00000004
+#define PCIECAP_AER_RERR_STA_MFNF 0x00000008
+#define PCIECAP_AER_RERR_F_UFATAL 0x00000010
+#define PCIECAP_AER_RERR_NFE 0x00000020
+#define PCIECAP_AER_RERR_FE 0x00000040
+#define PCIECAP_AER_RERR_MSINO_MASK 0xf8000000
+#define PCIECAP_AER_RERR_MSINO_LSH 27
+#define PCIECAP_AER_SRCID 0x34
+#define PCIECAP_AER_SRCID_CORR_MASK 0x0000ffff
+#define PCIECAP_AER_SRCID_CORR_LSH 0
+#define PCIECAP_AER_SRCID_FNF_MASK 0xffff0000
+#define PCIECAP_AER_SRCID_FNF_LSH 16
+#define PCIECAP_AER_TLP_PFX_LOG0 0x38
+#define PCIECAP_AER_TLP_PFX_LOG1 0x3c
+#define PCIECAP_AER_TLP_PFX_LOG2 0x40
+#define PCIECAP_AER_TLP_PFX_LOG3 0x44
+
+#endif /* __PCI_CFG_H */
diff --git a/include/pci.h b/include/pci.h
new file mode 100644
index 0000000..984bd38
--- /dev/null
+++ b/include/pci.h
@@ -0,0 +1,504 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __PCI_H
+#define __PCI_H
+
+#include <opal.h>
+#include <device.h>
+#include <ccan/list/list.h>
+
+/* PCI Slot Info: Wired Lane Values
+ *
+ * Values 0 to 6 match slot map 1005. In case of *any* change here
+ * make sure to keep the lxvpd.c parsing code in sync *and* the
+ * corresponding label strings in pci.c
+ */
+#define PCI_SLOT_WIRED_LANES_UNKNOWN 0x00
+#define PCI_SLOT_WIRED_LANES_PCIE_X1 0x01
+#define PCI_SLOT_WIRED_LANES_PCIE_X2 0x02
+#define PCI_SLOT_WIRED_LANES_PCIE_X4 0x03
+#define PCI_SLOT_WIRED_LANES_PCIE_X8 0x04
+#define PCI_SLOT_WIRED_LANES_PCIE_X16 0x05
+#define PCI_SLOT_WIRED_LANES_PCIE_X32 0x06
+#define PCI_SLOT_WIRED_LANES_PCIX_32 0x07
+#define PCI_SLOT_WIRED_LANES_PCIX_64 0x08
+
+/* PCI Slot Info: Bus Clock Values */
+#define PCI_SLOT_BUS_CLK_RESERVED 0x00
+#define PCI_SLOT_BUS_CLK_GEN_1 0x01
+#define PCI_SLOT_BUS_CLK_GEN_2 0x02
+#define PCI_SLOT_BUS_CLK_GEN_3 0x03
+
+/* PCI Slot Info: Connector Type Values */
+#define PCI_SLOT_CONNECTOR_PCIE_EMBED 0x00
+#define PCI_SLOT_CONNECTOR_PCIE_X1 0x01
+#define PCI_SLOT_CONNECTOR_PCIE_X2 0x02
+#define PCI_SLOT_CONNECTOR_PCIE_X4 0x03
+#define PCI_SLOT_CONNECTOR_PCIE_X8 0x04
+#define PCI_SLOT_CONNECTOR_PCIE_X16 0x05
+#define PCI_SLOT_CONNECTOR_PCIE_NS 0x0E /* Non-Standard */
+
+/* PCI Slot Info: Card Description Values */
+#define PCI_SLOT_DESC_NON_STANDARD 0x00 /* Embed/Non-Standard Connector */
+#define PCI_SLOT_DESC_PCIE_FH_FL 0x00 /* Full Height, Full Length */
+#define PCI_SLOT_DESC_PCIE_FH_HL 0x01 /* Full Height, Half Length */
+#define PCI_SLOT_DESC_PCIE_HH_FL 0x02 /* Half Height, Full Length */
+#define PCI_SLOT_DESC_PCIE_HH_HL 0x03 /* Half Height, Half Length */
+
+/* PCI Slot Info: Mechanicals Values */
+#define PCI_SLOT_MECH_NONE 0x00
+#define PCI_SLOT_MECH_RIGHT 0x01
+#define PCI_SLOT_MECH_LEFT 0x02
+#define PCI_SLOT_MECH_RIGHT_LEFT 0x03
+
+/* PCI Slot Info: Power LED Control Values */
+#define PCI_SLOT_PWR_LED_CTL_NONE 0x00 /* No Control */
+#define PCI_SLOT_PWR_LED_CTL_FSP 0x01 /* FSP Controlled */
+#define PCI_SLOT_PWR_LED_CTL_KERNEL 0x02 /* Kernel Controlled */
+
+/* PCI Slot Info: ATTN LED Control Values */
+#define PCI_SLOT_ATTN_LED_CTL_NONE 0x00 /* No Control */
+#define PCI_SLOT_ATTN_LED_CTL_FSP 0x01 /* FSP Controlled */
+#define PCI_SLOT_ATTN_LED_CTL_KERNEL 0x02 /* Kernel Controlled */
+
+/* PCI Slot Entry Information */
+struct pci_slot_info {
+ uint8_t switch_id;
+ uint8_t vswitch_id;
+ uint8_t dev_id;
+ char label[9];
+ bool pluggable;
+ bool power_ctl;
+ uint8_t wired_lanes;
+ uint8_t bus_clock;
+ uint8_t connector_type;
+ uint8_t card_desc;
+ uint8_t card_mech;
+ uint8_t pwr_led_ctl;
+ uint8_t attn_led_ctl;
+ uint8_t slot_index;
+};
+
+/*
+ * While this might not be necessary in the long run, the existing
+ * Linux kernels expect us to provide a device-tree that contains
+ * a representation of all PCI devices below the host bridge. Thus
+ * we need to perform a bus scan. We don't need to assign MMIO/IO
+ * resources, but we do need to assign bus numbers in a way that
+ * is going to be compatible with the HW constraints for PE filtering
+ * that is naturally aligned power of twos for ranges below a bridge.
+ *
+ * Thus the structure pci_device is used for the tracking of the
+ * detected devices and the later generation of the device-tree.
+ *
+ * We do not keep a separate structure for a bus, however a device
+ * can have children in which case a device is a bridge.
+ *
+ * Because this is likely to change, we avoid putting too much
+ * information in that structure nor relying on it for anything
+ * else but the construction of the flat device-tree.
+ */
+struct pci_device {
+ uint16_t bdfn;
+ bool is_bridge;
+ bool is_multifunction;
+ uint8_t dev_type; /* PCIE */
+ uint32_t scan_map;
+
+ uint64_t cap_list;
+ uint32_t cap[64];
+ uint32_t mps; /* Max payload size capability */
+
+ struct pci_slot_info *slot_info;
+ struct pci_device *parent;
+ struct list_head children;
+ struct list_node link;
+};
+
+static inline void pci_set_cap(struct pci_device *pd,
+ int id, int pos, bool ext)
+{
+ if (!ext) {
+ pd->cap_list |= (0x1ul << id);
+ pd->cap[id] = pos;
+ } else {
+ pd->cap_list |= (0x1ul << (id + 32));
+ pd->cap[id + 32] = pos;
+ }
+}
+
+static inline bool pci_has_cap(struct pci_device *pd,
+ int id, bool ext)
+{
+ if (!ext)
+ return !!(pd->cap_list & (0x1ul << id));
+ else
+ return !!(pd->cap_list & (0x1ul << (id + 32)));
+}
+
+static inline int pci_cap(struct pci_device *pd,
+ int id, bool ext)
+{
+ if (!ext)
+ return pd->cap[id];
+ else
+ return pd->cap[id + 32];
+}
+
+/*
+ * When generating the device-tree, we need to keep track of
+ * the LSI mapping & swizzle it. This state structure is
+ * passed by the PHB to pci_add_nodes() and will be used
+ * internally.
+ *
+ * We assume that the interrupt parent (PIC) #address-cells
+ * is 0 and #interrupt-cells has a max value of 2.
+ */
+struct pci_lsi_state {
+#define MAX_INT_SIZE 2
+ uint32_t int_size; /* #cells */
+ uint32_t int_val[4][MAX_INT_SIZE]; /* INTA...INTD */
+ uint32_t int_parent[4];
+};
+
+/*
+ * NOTE: All PCI functions return negative OPAL error codes
+ *
+ * In addition, some functions may return a positive timeout
+ * value or some other state information, see the description
+ * of individual functions. If nothing is specified, it's
+ * just an error code or 0 (success).
+ *
+ * Functions that operate asynchronously will return a positive
+ * delay value and will require the ->poll() op to be called after
+ * that delay. ->poll() will then return success, a negative error
+ * code, or another delay.
+ *
+ * Note: If an asynchronous function returns 0, it has completed
+ * successfully and does not require a call to ->poll(). Similarly
+ * if ->poll() is called while no operation is in progress, it will
+ * simply return 0 (success)
+ *
+ * Note that all functions except ->lock() itself assume that the
+ * caller is holding the PHB lock.
+ *
+ * TODO: Add more interfaces to control things like link width
+ * reduction for power savings etc...
+ */
+
+struct phb;
+
+struct phb_ops {
+ /*
+ * Locking. This is called around OPAL accesses
+ */
+ void (*lock)(struct phb *phb);
+ void (*unlock)(struct phb *phb);
+
+ /*
+ * Config space ops
+ */
+ int64_t (*cfg_read8)(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint8_t *data);
+ int64_t (*cfg_read16)(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint16_t *data);
+ int64_t (*cfg_read32)(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t *data);
+ int64_t (*cfg_write8)(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint8_t data);
+ int64_t (*cfg_write16)(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint16_t data);
+ int64_t (*cfg_write32)(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t data);
+
+ /*
+ * Bus number selection. See pci_scan() for a description
+ */
+ uint8_t (*choose_bus)(struct phb *phb, struct pci_device *bridge,
+ uint8_t candidate, uint8_t *max_bus,
+ bool *use_max);
+
+ /*
+ * Device init method is called after a device has been detected
+ * and before probing further. It can alter things like scan_map
+ * for bridge ports etc...
+ */
+ void (*device_init)(struct phb *phb, struct pci_device *device);
+
+ /*
+ * EEH methods
+ *
+ * The various arguments are identical to the corresponding
+ * OPAL functions
+ */
+ int64_t (*eeh_freeze_status)(struct phb *phb, uint64_t pe_number,
+ uint8_t *freeze_state,
+ uint16_t *pci_error_type,
+ uint16_t *severity,
+ uint64_t *phb_status);
+ int64_t (*eeh_freeze_clear)(struct phb *phb, uint64_t pe_number,
+ uint64_t eeh_action_token);
+
+ int64_t (*get_diag_data)(struct phb *phb, void *diag_buffer,
+ uint64_t diag_buffer_len);
+ int64_t (*get_diag_data2)(struct phb *phb, void *diag_buffer,
+ uint64_t diag_buffer_len);
+ int64_t (*next_error)(struct phb *phb, uint64_t *first_frozen_pe,
+ uint16_t *pci_error_type, uint16_t *severity);
+
+ /*
+ * Other IODA methods
+ *
+ * The various arguments are identical to the corresponding
+ * OPAL functions
+ */
+ int64_t (*pci_reinit)(struct phb *phb, uint64_t scope, uint64_t data);
+ int64_t (*phb_mmio_enable)(struct phb *phb, uint16_t window_type,
+ uint16_t window_num, uint16_t enable);
+
+ int64_t (*set_phb_mem_window)(struct phb *phb, uint16_t window_type,
+ uint16_t window_num, uint64_t addr,
+ uint64_t pci_addr, uint64_t size);
+
+ int64_t (*map_pe_mmio_window)(struct phb *phb, uint16_t pe_number,
+ uint16_t window_type, uint16_t window_num,
+ uint16_t segment_num);
+
+ int64_t (*set_pe)(struct phb *phb, uint64_t pe_number,
+ uint64_t bus_dev_func, uint8_t bus_compare,
+ uint8_t dev_compare, uint8_t func_compare,
+ uint8_t pe_action);
+
+ int64_t (*set_peltv)(struct phb *phb, uint32_t parent_pe,
+ uint32_t child_pe, uint8_t state);
+
+ int64_t (*map_pe_dma_window)(struct phb *phb, uint16_t pe_number,
+ uint16_t window_id, uint16_t tce_levels,
+ uint64_t tce_table_addr,
+ uint64_t tce_table_size,
+ uint64_t tce_page_size);
+
+ int64_t (*map_pe_dma_window_real)(struct phb *phb, uint16_t pe_number,
+ uint16_t dma_window_number,
+ uint64_t pci_start_addr,
+ uint64_t pci_mem_size);
+
+ int64_t (*set_mve)(struct phb *phb, uint32_t mve_number,
+ uint32_t pe_number);
+
+ int64_t (*set_mve_enable)(struct phb *phb, uint32_t mve_number,
+ uint32_t state);
+
+ int64_t (*set_xive_pe)(struct phb *phb, uint32_t pe_number,
+ uint32_t xive_num);
+
+ int64_t (*get_xive_source)(struct phb *phb, uint32_t xive_num,
+ int32_t *interrupt_source_number);
+
+ int64_t (*get_msi_32)(struct phb *phb, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ uint32_t *msi_address, uint32_t *message_data);
+
+ int64_t (*get_msi_64)(struct phb *phb, uint32_t mve_number,
+ uint32_t xive_num, uint8_t msi_range,
+ uint64_t *msi_address, uint32_t *message_data);
+
+ int64_t (*ioda_reset)(struct phb *phb, bool purge);
+
+ /*
+ * P5IOC2 only
+ */
+ int64_t (*set_phb_tce_memory)(struct phb *phb, uint64_t tce_mem_addr,
+ uint64_t tce_mem_size);
+
+ /*
+ * IODA2 PCI interfaces
+ */
+ int64_t (*pci_msi_eoi)(struct phb *phb, uint32_t hwirq);
+
+ /*
+ * Slot control
+ */
+
+ /* presence_detect - Check for a present device
+ *
+ * Immediate return of:
+ *
+ * OPAL_SHPC_DEV_NOT_PRESENT = 0,
+ * OPAL_SHPC_DEV_PRESENT = 1
+ *
+ * or a negative OPAL error code
+ */
+ int64_t (*presence_detect)(struct phb *phb);
+
+ /* link_state - Check link state
+ *
+ * Immediate return of:
+ *
+ * OPAL_SHPC_LINK_DOWN = 0,
+ * OPAL_SHPC_LINK_UP_x1 = 1,
+ * OPAL_SHPC_LINK_UP_x2 = 2,
+ * OPAL_SHPC_LINK_UP_x4 = 4,
+ * OPAL_SHPC_LINK_UP_x8 = 8,
+ * OPAL_SHPC_LINK_UP_x16 = 16,
+ * OPAL_SHPC_LINK_UP_x32 = 32
+ *
+ * or a negative OPAL error code
+ */
+ int64_t (*link_state)(struct phb *phb);
+
+ /* power_state - Check slot power state
+ *
+ * Immediate return of:
+ *
+ * OPAL_SLOT_POWER_OFF = 0,
+ * OPAL_SLOT_POWER_ON = 1,
+ *
+ * or a negative OPAL error code
+ */
+ int64_t (*power_state)(struct phb *phb);
+
+ /* slot_power_off - Start slot power off sequence
+ *
+ * Asynchronous function, returns a positive delay
+ * or a negative error code
+ */
+ int64_t (*slot_power_off)(struct phb *phb);
+
+ /* slot_power_on - Start slot power on sequence
+ *
+ * Asynchronous function, returns a positive delay
+ * or a negative error code.
+ */
+ int64_t (*slot_power_on)(struct phb *phb);
+
+ /* PHB power off and on after complete init */
+ int64_t (*complete_reset)(struct phb *phb, uint8_t assert);
+
+ /* hot_reset - Hot Reset sequence */
+ int64_t (*hot_reset)(struct phb *phb);
+
+ /* Fundamental reset */
+ int64_t (*fundamental_reset)(struct phb *phb);
+
+ /* poll - Poll and advance asynchronous operations
+ *
+ * Returns a positive delay, 0 for success or a
+ * negative OPAL error code
+ */
+ int64_t (*poll)(struct phb *phb);
+
+ /* Put phb in capi mode or pcie mode */
+ int64_t (*set_capi_mode)(struct phb *phb, uint64_t mode, uint64_t pe_number);
+};
+
+enum phb_type {
+ phb_type_pci,
+ phb_type_pcix_v1,
+ phb_type_pcix_v2,
+ phb_type_pcie_v1,
+ phb_type_pcie_v2,
+ phb_type_pcie_v3,
+};
+
+struct phb {
+ struct dt_node *dt_node;
+ int opal_id;
+ uint32_t scan_map;
+ enum phb_type phb_type;
+ struct list_head devices;
+ const struct phb_ops *ops;
+ struct pci_lsi_state lstate;
+ uint32_t mps;
+
+ /* PCI-X only slot info, for PCI-E this is in the RC bridge */
+ struct pci_slot_info *slot_info;
+
+ /* Base location code used to generate the children one */
+ const char *base_loc_code;
+
+ /* Additional data the platform might need to attach */
+ void *platform_data;
+};
+
+/* Config space ops wrappers */
+static inline int64_t pci_cfg_read8(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint8_t *data)
+{
+ return phb->ops->cfg_read8(phb, bdfn, offset, data);
+}
+
+static inline int64_t pci_cfg_read16(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint16_t *data)
+{
+ return phb->ops->cfg_read16(phb, bdfn, offset, data);
+}
+
+static inline int64_t pci_cfg_read32(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t *data)
+{
+ return phb->ops->cfg_read32(phb, bdfn, offset, data);
+}
+
+static inline int64_t pci_cfg_write8(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint8_t data)
+{
+ return phb->ops->cfg_write8(phb, bdfn, offset, data);
+}
+
+static inline int64_t pci_cfg_write16(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint16_t data)
+{
+ return phb->ops->cfg_write16(phb, bdfn, offset, data);
+}
+
+static inline int64_t pci_cfg_write32(struct phb *phb, uint32_t bdfn,
+ uint32_t offset, uint32_t data)
+{
+ return phb->ops->cfg_write32(phb, bdfn, offset, data);
+}
+
+/* Utilities */
+extern int64_t pci_find_cap(struct phb *phb, uint16_t bdfn, uint8_t cap);
+extern int64_t pci_find_ecap(struct phb *phb, uint16_t bdfn, uint16_t cap,
+ uint8_t *version);
+extern int32_t pci_configure_mps(struct phb *phb, struct pci_device *pd);
+
+extern struct pci_device *pci_walk_dev(struct phb *phb,
+ int (*cb)(struct phb *,
+ struct pci_device *,
+ void *),
+ void *userdata);
+extern struct pci_device *pci_find_dev(struct phb *phb, uint16_t bdfn);
+
+/* Manage PHBs */
+extern int64_t pci_register_phb(struct phb *phb);
+extern int64_t pci_unregister_phb(struct phb *phb);
+extern struct phb *pci_get_phb(uint64_t phb_id);
+static inline void pci_put_phb(struct phb *phb __unused) { }
+
+/* Device tree */
+extern void pci_std_swizzle_irq_map(struct dt_node *dt_node,
+ struct pci_device *pd,
+ struct pci_lsi_state *lstate,
+ uint8_t swizzle);
+
+/* Initialize all PCI slots */
+extern void pci_init_slots(void);
+extern void pci_reset(void);
+
+#endif /* __PCI_H */
diff --git a/include/phb3-regs.h b/include/phb3-regs.h
new file mode 100644
index 0000000..0aaab9a
--- /dev/null
+++ b/include/phb3-regs.h
@@ -0,0 +1,436 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __PHB3_REGS_H
+#define __PHB3_REGS_H
+
+
+/*
+ * PHB registers
+ */
+
+/* PHB Fundamental register set A */
+#define PHB_LSI_SOURCE_ID 0x100
+#define PHB_LSI_SRC_ID_MASK PPC_BITMASK(5,12)
+#define PHB_LSI_SRC_ID_LSH PPC_BITLSHIFT(12)
+#define PHB_DMA_CHAN_STATUS 0x110
+#define PHB_DMA_CHAN_ANY_ERR PPC_BIT(27)
+#define PHB_DMA_CHAN_ANY_ERR1 PPC_BIT(28)
+#define PHB_DMA_CHAN_ANY_FREEZE PPC_BIT(29)
+#define PHB_CPU_LOADSTORE_STATUS 0x120
+#define PHB_CPU_LS_ANY_ERR PPC_BIT(27)
+#define PHB_CPU_LS_ANY_ERR1 PPC_BIT(28)
+#define PHB_CPU_LS_ANY_FREEZE PPC_BIT(29)
+#define PHB_DMA_MSI_NODE_ID 0x128
+#define PHB_DMAMSI_NID_FIXED PPC_BIT(0)
+#define PHB_DMAMSI_NID_MASK PPC_BITMASK(24,31)
+#define PHB_DMAMSI_NID_LSH PPC_BITLSHIFT(31)
+#define PHB_CONFIG_DATA 0x130
+#define PHB_LOCK0 0x138
+#define PHB_CONFIG_ADDRESS 0x140
+#define PHB_CA_ENABLE PPC_BIT(0)
+#define PHB_CA_BUS_MASK PPC_BITMASK(4,11)
+#define PHB_CA_BUS_LSH PPC_BITLSHIFT(11)
+#define PHB_CA_DEV_MASK PPC_BITMASK(12,16)
+#define PHB_CA_DEV_LSH PPC_BITLSHIFT(16)
+#define PHB_CA_FUNC_MASK PPC_BITMASK(17,19)
+#define PHB_CA_FUNC_LSH PPC_BITLSHIFT(19)
+#define PHB_CA_REG_MASK PPC_BITMASK(20,31)
+#define PHB_CA_REG_LSH PPC_BITLSHIFT(31)
+#define PHB_CA_PE_MASK PPC_BITMASK(40,47)
+#define PHB_CA_PE_LSH PPC_BITLSHIFT(47)
+#define PHB_LOCK1 0x148
+#define PHB_IVT_BAR 0x150
+#define PHB_IVT_BAR_ENABLE PPC_BIT(0)
+#define PHB_IVT_BASE_ADDRESS_MASK PPC_BITMASK(14,48)
+#define PHB_IVT_BASE_ADDRESS_LSH PPC_BITLSHIFT(48)
+#define PHB_IVT_LENGTH_MASK PPC_BITMASK(52,63)
+#define PHB_IVT_LENGTH_ADDRESS_LSH PPC_BITLSHIFT(63)
+#define PHB_RBA_BAR 0x158
+#define PHB_RBA_BAR_ENABLE PPC_BIT(0)
+#define PHB_RBA_BASE_ADDRESS_MASK PPC_BITMASK(14,55)
+#define PHB_RBA_BASE_ADDRESS_LSH PPC_BITLSHIFT(55)
+#define PHB_PHB3_CONFIG 0x160
+#define PHB_PHB3C_64B_TCE_EN PPC_BIT(2)
+#define PHB_PHB3C_32BIT_MSI_EN PPC_BIT(8)
+#define PHB_PHB3C_64BIT_MSI_EN PPC_BIT(14)
+#define PHB_PHB3C_M32_EN PPC_BIT(16)
+#define PHB_RTT_BAR 0x168
+#define PHB_RTT_BAR_ENABLE PPC_BIT(0)
+#define PHB_RTT_BASE_ADDRESS_MASK PPC_BITMASK(14,46)
+#define PHB_RTT_BASE_ADDRESS_LSH PPC_BITLSHIFT(46)
+#define PHB_PELTV_BAR 0x188
+#define PHB_PELTV_BAR_ENABLE PPC_BIT(0)
+#define PHB_PELTV_BASE_ADDRESS_MASK PPC_BITMASK(14,50)
+#define PHB_PELTV_BASE_ADDRESS_LSH PPC_BITLSHIFT(50)
+#define PHB_M32_BASE_ADDR 0x190
+#define PHB_M32_BASE_MASK 0x198
+#define PHB_M32_START_ADDR 0x1a0
+#define PHB_PEST_BAR 0x1a8
+#define PHB_PEST_BAR_ENABLE PPC_BIT(0)
+#define PHB_PEST_BASE_ADDRESS_MASK PPC_BITMASK(14,51)
+#define PHB_PEST_BASE_ADDRESS_LSH PPC_BITLSHIFT(51)
+#define PHB_M64_UPPER_BITS 0x1f0
+#define PHB_INTREP_TIMER 0x1f8
+#define PHB_DMARD_SYNC 0x200
+#define PHB_RTC_INVALIDATE 0x208
+#define PHB_RTC_INVALIDATE_ALL PPC_BIT(0)
+#define PHB_RTC_INVALIDATE_RID_MASK PPC_BITMASK(16,31)
+#define PHB_RTC_INVALIDATE_RID_LSH PPC_BITLSHIFT(31)
+#define PHB_TCE_KILL 0x210
+#define PHB_TCE_KILL_ALL PPC_BIT(0)
+#define PHB_TCE_SPEC_CTL 0x218
+#define PHB_IODA_ADDR 0x220
+#define PHB_IODA_AD_AUTOINC PPC_BIT(0)
+#define PHB_IODA_AD_TSEL_MASK PPC_BITMASK(11,15)
+#define PHB_IODA_AD_TSEL_LSH PPC_BITLSHIFT(15)
+#define PHB_IODA_AD_TADR_MASK PPC_BITMASK(55,63)
+#define PHB_IODA_AD_TADR_LSH PPC_BITLSHIFT(63)
+#define PHB_IODA_DATA0 0x228
+#define PHB_FFI_REQUEST 0x238
+#define PHB_FFI_LOCK_CLEAR PPC_BIT(3)
+#define PHB_FFI_REQUEST_ISN_MASK PPC_BITMASK(49,59)
+#define PHB_FFI_REQUEST_ISN_LSH PPC_BITLSHIFT(59)
+#define PHB_FFI_LOCK 0x240
+#define PHB_XIVE_UPDATE 0x248 /* Broken in DD1 */
+#define PHB_PHB3_GEN_CAP 0x250
+#define PHB_PHB3_TCE_CAP 0x258
+#define PHB_PHB3_IRQ_CAP 0x260
+#define PHB_PHB3_EEH_CAP 0x268
+#define PHB_IVC_INVALIDATE 0x2a0
+#define PHB_IVC_INVALIDATE_ALL PPC_BIT(0)
+#define PHB_IVC_INVALIDATE_SID_MASK PPC_BITMASK(16,31)
+#define PHB_IVC_INVALIDATE_SID_LSH PPC_BITLSHIFT(31)
+#define PHB_IVC_UPDATE 0x2a8
+#define PHB_IVC_UPDATE_ENABLE_P PPC_BIT(0)
+#define PHB_IVC_UPDATE_ENABLE_Q PPC_BIT(1)
+#define PHB_IVC_UPDATE_ENABLE_SERVER PPC_BIT(2)
+#define PHB_IVC_UPDATE_ENABLE_PRI PPC_BIT(3)
+#define PHB_IVC_UPDATE_ENABLE_GEN PPC_BIT(4)
+#define PHB_IVC_UPDATE_ENABLE_CON PPC_BIT(5)
+#define PHB_IVC_UPDATE_GEN_MATCH_MASK PPC_BITMASK(6, 7)
+#define PHB_IVC_UPDATE_GEN_MATCH_LSH PPC_BITLSHIFT(7)
+#define PHB_IVC_UPDATE_SERVER_MASK PPC_BITMASK(8, 23)
+#define PHB_IVC_UPDATE_SERVER_LSH PPC_BITLSHIFT(23)
+#define PHB_IVC_UPDATE_PRI_MASK PPC_BITMASK(24, 31)
+#define PHB_IVC_UPDATE_PRI_LSH PPC_BITLSHIFT(31)
+#define PHB_IVC_UPDATE_GEN_MASK PPC_BITMASK(32,33)
+#define PHB_IVC_UPDATE_GEN_LSH PPC_BITLSHIFT(33)
+#define PHB_IVC_UPDATE_P_MASK PPC_BITMASK(34,34)
+#define PHB_IVC_UPDATE_P_LSH PPC_BITLSHIFT(34)
+#define PHB_IVC_UPDATE_Q_MASK PPC_BITMASK(35,35)
+#define PHB_IVC_UPDATE_Q_LSH PPC_BITLSHIFT(35)
+#define PHB_IVC_UPDATE_SID_MASK PPC_BITMASK(48,63)
+#define PHB_IVC_UPDATE_SID_LSH PPC_BITLSHIFT(63)
+#define PHB_PAPR_ERR_INJ_CONTROL 0x2b0
+#define PHB_PAPR_ERR_INJ_ADDR 0x2b8
+#define PHB_PAPR_ERR_INJ_MASK 0x2c0
+#define PHB_ETU_ERR_SUMMARY 0x2c8
+
+/* UTL registers */
+#define UTL_SYS_BUS_CONTROL 0x400
+#define UTL_STATUS 0x408
+#define UTL_SYS_BUS_AGENT_STATUS 0x410
+#define UTL_SYS_BUS_AGENT_ERR_SEVERITY 0x418
+#define UTL_SYS_BUS_AGENT_IRQ_EN 0x420
+#define UTL_SYS_BUS_BURST_SZ_CONF 0x440
+#define UTL_REVISION_ID 0x448
+#define UTL_BCLK_DOMAIN_DBG1 0x460
+#define UTL_BCLK_DOMAIN_DBG2 0x468
+#define UTL_BCLK_DOMAIN_DBG3 0x470
+#define UTL_BCLK_DOMAIN_DBG4 0x478
+#define UTL_BCLK_DOMAIN_DBG5 0x480
+#define UTL_BCLK_DOMAIN_DBG6 0x488
+#define UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
+#define UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
+#define UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
+#define UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
+#define UTL_OUT_NP_BUF_ALLOC 0x500
+#define UTL_IN_NP_BUF_ALLOC 0x510
+#define UTL_PCIE_TAGS_ALLOC 0x520
+#define UTL_GBIF_READ_TAGS_ALLOC 0x530
+#define UTL_PCIE_PORT_CONTROL 0x540
+#define UTL_PCIE_PORT_STATUS 0x548
+#define UTL_PCIE_PORT_ERROR_SEV 0x550
+#define UTL_PCIE_PORT_IRQ_EN 0x558
+#define UTL_RC_STATUS 0x560
+#define UTL_RC_ERR_SEVERITY 0x568
+#define UTL_RC_IRQ_EN 0x570
+#define UTL_EP_STATUS 0x578
+#define UTL_EP_ERR_SEVERITY 0x580
+#define UTL_EP_ERR_IRQ_EN 0x588
+#define UTL_PCI_PM_CTRL1 0x590
+#define UTL_PCI_PM_CTRL2 0x598
+#define UTL_GP_CTL1 0x5a0
+#define UTL_GP_CTL2 0x5a8
+#define UTL_PCLK_DOMAIN_DBG1 0x5b0
+#define UTL_PCLK_DOMAIN_DBG2 0x5b8
+#define UTL_PCLK_DOMAIN_DBG3 0x5c0
+#define UTL_PCLK_DOMAIN_DBG4 0x5c8
+
+/* PCI-E Stack registers */
+#define PHB_PCIE_SYSTEM_CONFIG 0x600
+#define PHB_PCIE_BUS_NUMBER 0x608
+#define PHB_PCIE_SYSTEM_TEST 0x618
+#define PHB_PCIE_LINK_MANAGEMENT 0x630
+#define PHB_PCIE_LM_LINK_ACTIVE PPC_BIT(8)
+#define PHB_PCIE_DLP_TRAIN_CTL 0x640
+#define PHB_PCIE_DLP_TCTX_DISABLE PPC_BIT(1)
+#define PHB_PCIE_DLP_TCRX_DISABLED PPC_BIT(16)
+#define PHB_PCIE_DLP_INBAND_PRESENCE PPC_BIT(19)
+#define PHB_PCIE_DLP_TC_DL_LINKUP PPC_BIT(21)
+#define PHB_PCIE_DLP_TC_DL_PGRESET PPC_BIT(22)
+#define PHB_PCIE_DLP_TC_DL_LINKACT PPC_BIT(23)
+#define PHB_PCIE_SLOP_LOOPBACK_STATUS 0x648
+#define PHB_PCIE_SYS_LINK_INIT 0x668
+#define PHB_PCIE_UTL_CONFIG 0x670
+#define PHB_PCIE_DLP_CONTROL 0x678
+#define PHB_PCIE_UTL_ERRLOG1 0x680
+#define PHB_PCIE_UTL_ERRLOG2 0x688
+#define PHB_PCIE_UTL_ERRLOG3 0x690
+#define PHB_PCIE_UTL_ERRLOG4 0x698
+#define PHB_PCIE_DLP_ERRLOG1 0x6a0
+#define PHB_PCIE_DLP_ERRLOG2 0x6a8
+#define PHB_PCIE_DLP_ERR_STATUS 0x6b0
+#define PHB_PCIE_DLP_ERR_COUNTERS 0x6b8
+#define PHB_PCIE_UTL_ERR_INJECT 0x6c0
+#define PHB_PCIE_TLDLP_ERR_INJECT 0x6c8
+#define PHB_PCIE_LANE_EQ_CNTL0 0x6d0
+#define PHB_PCIE_LANE_EQ_CNTL1 0x6d8
+#define PHB_PCIE_LANE_EQ_CNTL2 0x6e0
+#define PHB_PCIE_LANE_EQ_CNTL3 0x6e8
+#define PHB_PCIE_STRAPPING 0x700
+
+/* Fundamental register set B */
+#define PHB_VERSION 0x800
+#define PHB_RESET 0x808
+#define PHB_CONTROL 0x810
+#define PHB_AIB_RX_CRED_INIT_TIMER 0x818
+#define PHB_AIB_RX_CMD_CRED 0x820
+#define PHB_AIB_RX_DATA_CRED 0x828
+#define PHB_AIB_TX_CMD_CRED 0x830
+#define PHB_AIB_TX_DATA_CRED 0x838
+#define PHB_AIB_TX_CHAN_MAPPING 0x840
+#define PHB_AIB_TAG_ENABLE 0x858
+#define PHB_AIB_FENCE_CTRL 0x860
+#define PHB_TCE_TAG_ENABLE 0x868
+#define PHB_TCE_WATERMARK 0x870
+#define PHB_TIMEOUT_CTRL1 0x878
+#define PHB_TIMEOUT_CTRL2 0x880
+#define PHB_QUIESCE_DMA_G 0x888
+#define PHB_AIB_TAG_STATUS 0x900
+#define PHB_TCE_TAG_STATUS 0x908
+
+/* FIR & Error registers */
+#define PHB_LEM_FIR_ACCUM 0xc00
+#define PHB_LEM_FIR_AND_MASK 0xc08
+#define PHB_LEM_FIR_OR_MASK 0xc10
+#define PHB_LEM_ERROR_MASK 0xc18
+#define PHB_LEM_ERROR_AND_MASK 0xc20
+#define PHB_LEM_ERROR_OR_MASK 0xc28
+#define PHB_LEM_ACTION0 0xc30
+#define PHB_LEM_ACTION1 0xc38
+#define PHB_LEM_WOF 0xc40
+#define PHB_ERR_STATUS 0xc80
+#define PHB_ERR1_STATUS 0xc88
+#define PHB_ERR_INJECT 0xc90
+#define PHB_ERR_LEM_ENABLE 0xc98
+#define PHB_ERR_IRQ_ENABLE 0xca0
+#define PHB_ERR_FREEZE_ENABLE 0xca8
+#define PHB_ERR_AIB_FENCE_ENABLE 0xcb0
+#define PHB_ERR_LOG_0 0xcc0
+#define PHB_ERR_LOG_1 0xcc8
+#define PHB_ERR_STATUS_MASK 0xcd0
+#define PHB_ERR1_STATUS_MASK 0xcd8
+
+#define PHB_OUT_ERR_STATUS 0xd00
+#define PHB_OUT_ERR1_STATUS 0xd08
+#define PHB_OUT_ERR_INJECT 0xd10
+#define PHB_OUT_ERR_LEM_ENABLE 0xd18
+#define PHB_OUT_ERR_IRQ_ENABLE 0xd20
+#define PHB_OUT_ERR_FREEZE_ENABLE 0xd28
+#define PHB_OUT_ERR_AIB_FENCE_ENABLE 0xd30
+#define PHB_OUT_ERR_LOG_0 0xd40
+#define PHB_OUT_ERR_LOG_1 0xd48
+#define PHB_OUT_ERR_STATUS_MASK 0xd50
+#define PHB_OUT_ERR1_STATUS_MASK 0xd58
+
+#define PHB_INA_ERR_STATUS 0xd80
+#define PHB_INA_ERR1_STATUS 0xd88
+#define PHB_INA_ERR_INJECT 0xd90
+#define PHB_INA_ERR_LEM_ENABLE 0xd98
+#define PHB_INA_ERR_IRQ_ENABLE 0xda0
+#define PHB_INA_ERR_FREEZE_ENABLE 0xda8
+#define PHB_INA_ERR_AIB_FENCE_ENABLE 0xdb0
+#define PHB_INA_ERR_LOG_0 0xdc0
+#define PHB_INA_ERR_LOG_1 0xdc8
+#define PHB_INA_ERR_STATUS_MASK 0xdd0
+#define PHB_INA_ERR1_STATUS_MASK 0xdd8
+
+#define PHB_INB_ERR_STATUS 0xe00
+#define PHB_INB_ERR1_STATUS 0xe08
+#define PHB_INB_ERR_INJECT 0xe10
+#define PHB_INB_ERR_LEM_ENABLE 0xe18
+#define PHB_INB_ERR_IRQ_ENABLE 0xe20
+#define PHB_INB_ERR_FREEZE_ENABLE 0xe28
+#define PHB_INB_ERR_AIB_FENCE_ENABLE 0xe30
+#define PHB_INB_ERR_LOG_0 0xe40
+#define PHB_INB_ERR_LOG_1 0xe48
+#define PHB_INB_ERR_STATUS_MASK 0xe50
+#define PHB_INB_ERR1_STATUS_MASK 0xe58
+
+/* Performance monitor & Debug registers */
+#define PHB_TRACE_CONTROL 0xf80
+#define PHB_PERFMON_CONFIG 0xf88
+#define PHB_PERFMON_CTR0 0xf90
+#define PHB_PERFMON_CTR1 0xf98
+#define PHB_PERFMON_CTR2 0xfa0
+#define PHB_PERFMON_CTR3 0xfa8
+#define PHB_HOTPLUG_OVERRIDE 0xfb0
+#define PHB_HPOVR_FORCE_RESAMPLE PPC_BIT(9)
+#define PHB_HPOVR_PRESENCE_A PPC_BIT(10)
+#define PHB_HPOVR_PRESENCE_B PPC_BIT(11)
+#define PHB_HPOVR_LINK_ACTIVE PPC_BIT(12)
+#define PHB_HPOVR_LINK_BIFURCATED PPC_BIT(13)
+#define PHB_HPOVR_LINK_LANE_SWAPPED PPC_BIT(14)
+
+/*
+ * IODA2 on-chip tables
+ */
+
+#define IODA2_TBL_LIST 1
+#define IODA2_TBL_LXIVT 2
+#define IODA2_TBL_IVC_CAM 3
+#define IODA2_TBL_RBA 4
+#define IODA2_TBL_RCAM 5
+#define IODA2_TBL_MRT 6
+#define IODA2_TBL_PESTA 7
+#define IODA2_TBL_PESTB 8
+#define IODA2_TBL_TVT 9
+#define IODA2_TBL_TCAM 10
+#define IODA2_TBL_TDR 11
+#define IODA2_TBL_M64BT 16
+#define IODA2_TBL_M32DT 17
+#define IODA2_TBL_PEEV 20
+
+/* LXIVT */
+#define IODA2_LXIVT_SERVER_MASK PPC_BITMASK(8,23)
+#define IODA2_LXIVT_SERVER_LSH PPC_BITLSHIFT(23)
+#define IODA2_LXIVT_PRIORITY_MASK PPC_BITMASK(24,31)
+#define IODA2_LXIVT_PRIORITY_LSH PPC_BITLSHIFT(31)
+#define IODA2_LXIVT_NODE_ID_MASK PPC_BITMASK(56,63)
+#define IODA2_LXIVT_NODE_ID_LSH PPC_BITLSHIFT(63)
+
+/* IVT */
+#define IODA2_IVT_SERVER_MASK PPC_BITMASK(0,23)
+#define IODA2_IVT_SERVER_LSH PPC_BITLSHIFT(23)
+#define IODA2_IVT_PRIORITY_MASK PPC_BITMASK(24,31)
+#define IODA2_IVT_PRIORITY_LSH PPC_BITLSHIFT(31)
+#define IODA2_IVT_P_MASK PPC_BITMASK(39,39)
+#define IODA2_IVT_P_LSH PPC_BITLSHIFT(39)
+#define IODA2_IVT_Q_MASK PPC_BITMASK(47,47)
+#define IODA2_IVT_Q_LSH PPC_BITLSHIFT(47)
+#define IODA2_IVT_PE_MASK PPC_BITMASK(48,63)
+#define IODA2_IVT_PE_LSH PPC_BITLSHIFT(63)
+
+/* TVT */
+#define IODA2_TVT_TABLE_ADDR_MASK PPC_BITMASK(0,47)
+#define IODA2_TVT_TABLE_ADDR_LSH PPC_BITLSHIFT(47)
+#define IODA2_TVT_NUM_LEVELS_MASK PPC_BITMASK(48,50)
+#define IODA2_TVT_NUM_LEVELS_LSH PPC_BITLSHIFT(50)
+#define IODA2_TVE_1_LEVEL 0
+#define IODA2_TVE_2_LEVELS 1
+#define IODA2_TVE_3_LEVELS 2
+#define IODA2_TVE_4_LEVELS 3
+#define IODA2_TVE_5_LEVELS 4
+#define IODA2_TVT_TCE_TABLE_SIZE_MASK PPC_BITMASK(51,55)
+#define IODA2_TVT_TCE_TABLE_SIZE_LSH PPC_BITLSHIFT(55)
+#define IODA2_TVT_IO_PSIZE_MASK PPC_BITMASK(59,63)
+#define IODA2_TVT_IO_PSIZE_LSH PPC_BITLSHIFT(63)
+
+/* PESTA */
+#define IODA2_PESTA_MMIO_FROZEN PPC_BIT(0)
+
+/* PESTB */
+#define IODA2_PESTB_DMA_STOPPED PPC_BIT(0)
+
+/* M32DT */
+#define IODA2_M32DT_PE_MASK PPC_BITMASK(8,15)
+#define IODA2_M32DT_PE_LSH PPC_BITLSHIFT(15)
+
+/* M64BT */
+#define IODA2_M64BT_ENABLE PPC_BIT(0)
+#define IODA2_M64BT_SINGLE_PE PPC_BIT(1)
+#define IODA2_M64BT_BASE_MASK PPC_BITMASK(2,31)
+#define IODA2_M64BT_BASE_LSH PPC_BITLSHIFT(31)
+#define IODA2_M64BT_MASK_MASK PPC_BITMASK(34,63)
+#define IODA2_M64BT_MASK_LSH PPC_BITLSHIFT(63)
+#define IODA2_M64BT_SINGLE_BASE_MASK PPC_BITMASK(2,26)
+#define IODA2_M64BT_SINGLE_BASE_LSH PPC_BITLSHIFT(26)
+#define IODA2_M64BT_PE_HI_MASK PPC_BITMASK(27,31)
+#define IODA2_M64BT_PE_HI_LSH PPC_BITLSHIFT(31)
+#define IODA2_M64BT_SINGLE_MASK_MASK PPC_BITMASK(34,58)
+#define IODA2_M64BT_SINGLE_MASK_LSH PPC_BITLSHIFT(58)
+#define IODA2_M64BT_PE_LOW_MASK PPC_BITMASK(59,63)
+#define IODA2_M64BT_PE_LOW_LSH PPC_BITLSHIFT(63)
+
+/*
+ * IODA2 in-memory tables
+ */
+
+/* PEST
+ *
+ * 2x8 bytes entries, PEST0 and PEST1
+ */
+
+#define IODA2_PEST0_MMIO_CAUSE PPC_BIT(2)
+#define IODA2_PEST0_CFG_READ PPC_BIT(3)
+#define IODA2_PEST0_CFG_WRITE PPC_BIT(4)
+#define IODA2_PEST0_TTYPE_MASK PPC_BITMASK(5,7)
+#define IODA2_PEST0_TTYPE_LSH PPC_BITLSHIFT(7)
+#define PEST_TTYPE_DMA_WRITE 0
+#define PEST_TTYPE_MSI 1
+#define PEST_TTYPE_DMA_READ 2
+#define PEST_TTYPE_DMA_READ_RESP 3
+#define PEST_TTYPE_MMIO_LOAD 4
+#define PEST_TTYPE_MMIO_STORE 5
+#define PEST_TTYPE_OTHER 7
+#define IODA2_PEST0_CA_RETURN PPC_BIT(8)
+#define IODA2_PEST0_UTL_RTOS_TIMEOUT PPC_BIT(8) /* Same bit as CA return */
+#define IODA2_PEST0_UR_RETURN PPC_BIT(9)
+#define IODA2_PEST0_UTL_NONFATAL PPC_BIT(10)
+#define IODA2_PEST0_UTL_FATAL PPC_BIT(11)
+#define IODA2_PEST0_PARITY_UE PPC_BIT(13)
+#define IODA2_PEST0_UTL_CORRECTABLE PPC_BIT(14)
+#define IODA2_PEST0_UTL_INTERRUPT PPC_BIT(15)
+#define IODA2_PEST0_MMIO_XLATE PPC_BIT(16)
+#define IODA2_PEST0_IODA2_ERROR PPC_BIT(16) /* Same bit as MMIO xlate */
+#define IODA2_PEST0_TCE_PAGE_FAULT PPC_BIT(18)
+#define IODA2_PEST0_TCE_ACCESS_FAULT PPC_BIT(19)
+#define IODA2_PEST0_DMA_RESP_TIMEOUT PPC_BIT(20)
+#define IODA2_PEST0_AIB_SIZE_INVALID PPC_BIT(21)
+#define IODA2_PEST0_LEM_BIT_MASK PPC_BITMASK(26,31)
+#define IODA2_PEST0_LEM_BIT_LSH PPC_BITLSHIFT(31)
+#define IODA2_PEST0_RID_MASK PPC_BITMASK(32,47)
+#define IODA2_PEST0_RID_LSH PPC_BITLSHIFT(47)
+#define IODA2_PEST0_MSI_DATA_MASK PPC_BITMASK(48,63)
+#define IODA2_PEST0_MSI_DATA_LSH PPC_BITLSHIFT(63)
+
+#define IODA2_PEST1_FAIL_ADDR_MASK PPC_BITMASK(3,63)
+#define IODA2_PEST1_FAIL_ADDR_LSH PPC_BITLSHIFT(63)
+
+
+#endif /* __PHB3_REGS_H */
diff --git a/include/phb3.h b/include/phb3.h
new file mode 100644
index 0000000..9789336
--- /dev/null
+++ b/include/phb3.h
@@ -0,0 +1,355 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+*/
+#ifndef __PHB3_H
+#define __PHB3_H
+
+#include <interrupts.h>
+
+/*
+ * Memory map
+ *
+ * In addition to the 4K MMIO registers window, the PBCQ will
+ * forward down one or two large MMIO regions for use by the
+ * PHB.
+ *
+ * We try to use the largest MMIO window for the M64 space and
+ * the smallest for the M32 space, but we require at least 2G
+ * of M32, otherwise we carve it out of M64.
+ */
+
+#define M32_PCI_START 0x080000000 /* Offset of the actual M32 window in PCI */
+#define M32_PCI_SIZE 0x80000000ul /* Size for M32 */
+
+/*
+ * Interrupt map.
+ *
+ * Each PHB supports 2K interrupt sources, which is shared by
+ * LSI and MSI. With default configuration, MSI would use range
+ * [0, 0x7f7] and LSI would use [0x7f8, 0x7ff]. The interrupt
+ * source should be combined with IRSN to form final hardware
+ * IRQ.
+ */
+#define PHB3_MSI_IRQ_MIN 0x000
+#define PHB3_MSI_IRQ_COUNT 0x7F8
+#define PHB3_MSI_IRQ_MAX (PHB3_MSI_IRQ_MIN+PHB3_MSI_IRQ_COUNT-1)
+#define PHB3_LSI_IRQ_MIN (PHB3_MSI_IRQ_COUNT)
+#define PHB3_LSI_IRQ_COUNT 8
+#define PHB3_LSI_IRQ_MAX (PHB3_LSI_IRQ_MIN+PHB3_LSI_IRQ_COUNT-1)
+
+#define PHB3_MSI_IRQ_BASE(chip, phb) (P8_CHIP_IRQ_PHB_BASE(chip, phb) | \
+ PHB3_MSI_IRQ_MIN)
+#define PHB3_LSI_IRQ_BASE(chip, phb) (P8_CHIP_IRQ_PHB_BASE(chip, phb) | \
+ PHB3_LSI_IRQ_MIN)
+#define PHB3_IRQ_NUM(irq) (irq & 0x7FF)
+
+/*
+ * LSI interrupts
+ *
+ * The LSI interrupt block supports 8 interrupts. 4 of them are the
+ * standard PCIe INTA..INTB. The rest is for additional functions
+ * of the PHB
+ */
+#define PHB3_LSI_PCIE_INTA 0
+#define PHB3_LSI_PCIE_INTB 1
+#define PHB3_LSI_PCIE_INTC 2
+#define PHB3_LSI_PCIE_INTD 3
+#define PHB3_LSI_PCIE_INF 6
+#define PHB3_LSI_PCIE_ER 7
+
+/*
+ * In-memory tables
+ *
+ * PHB3 requires a bunch of tables to be in memory instead of
+ * arrays inside the chip (unlike previous versions of the
+ * design).
+ *
+ * Some of them (IVT, etc...) will be provided by the OS via an
+ * OPAL call, not only not all of them, we also need to make sure
+ * some like PELT-V exist before we do our internal slot probing
+ * or bad thing would happen on error (the whole PHB would go into
+ * Fatal error state).
+ *
+ * So we maintain a set of tables internally for those mandatory
+ * ones within our core memory. They are fairly small. They can
+ * still be replaced by OS provided ones via OPAL APIs (and reset
+ * to the internal ones) so the OS can provide node local allocation
+ * for better performances.
+ *
+ * All those tables have to be naturally aligned
+ */
+
+/* RTT Table : 128KB - Maps RID to PE#
+ *
+ * Entries are 2 bytes indexed by PCIe RID
+ */
+#define RTT_TABLE_ENTRIES 0x10000
+#define RTT_TABLE_SIZE (RTT_TABLE_ENTRIES * sizeof(struct rtt_entry))
+struct rtt_entry {
+ uint16_t pe_num;
+};
+
+/* IVT Table : MSI Interrupt vectors * state.
+ *
+ * We're sure that simics has 16-bytes IVE, totally 32KB.
+ * However the real HW possiblly has 128-bytes IVE, totally 256KB.
+ */
+#define IVT_TABLE_ENTRIES 0x800
+
+/* Default to 128-bytes IVEs, uncomment that to force it back to 16-bytes */
+//#define IVT_TABLE_IVE_16B
+
+#ifdef IVT_TABLE_IVE_16B
+#define IVT_TABLE_SIZE 0x8000
+#define IVT_TABLE_STRIDE 2 /* double-words */
+#else
+#define IVT_TABLE_SIZE 0x40000
+#define IVT_TABLE_STRIDE 16 /* double-words */
+#endif
+
+/* PELT-V Table : 8KB - Maps PE# to PE# dependencies
+ *
+ * 256 entries of 256 bits (32 bytes) each
+ */
+#define PELTV_TABLE_SIZE 0x2000
+
+/* PEST Table : 4KB - PE state table
+ *
+ * 256 entries of 16 bytes each containing state bits for each PE
+ *
+ * AFAIK: This acts as a backup for an on-chip cache and shall be
+ * accessed via the indirect IODA table access registers only
+ */
+#define PEST_TABLE_SIZE 0x1000
+
+/* RBA Table : 256 bytes - Reject Bit Array
+ *
+ * 2048 interrupts, 1 bit each, indiates the reject state of interrupts
+ */
+#define RBA_TABLE_SIZE 0x100
+
+/*
+ * Maximal supported PE# in PHB3. We probably probe it from EEH
+ * capability register later.
+ */
+#define PHB3_MAX_PE_NUM 256
+
+/*
+ * State structure for a PHB
+ */
+
+/*
+ * (Comment copied from p7ioc.h, please update both when relevant)
+ *
+ * The PHB State structure is essentially used during PHB reset
+ * or recovery operations to indicate that the PHB cannot currently
+ * be used for normal operations.
+ *
+ * Some states involve waiting for the timebase to reach a certain
+ * value. In which case the field "delay_tgt_tb" is set and the
+ * state machine will be run from the "state_poll" callback.
+ *
+ * At IPL time, we call this repeatedly during the various sequences
+ * however under OS control, this will require a change in API.
+ *
+ * Fortunately, the OPAL API for slot power & reset are not currently
+ * used by Linux, so changing them isn't going to be an issue. The idea
+ * here is that some of these APIs will return a positive integer when
+ * neededing such a delay to proceed. The OS will then be required to
+ * call a new function opal_poll_phb() after that delay. That function
+ * will potentially return a new delay, or OPAL_SUCCESS when the original
+ * operation has completed successfully. If the operation has completed
+ * with an error, then opal_poll_phb() will return that error.
+ *
+ * Note: Should we consider also returning optionally some indication
+ * of what operation is in progress for OS debug/diag purposes ?
+ *
+ * Any attempt at starting a new "asynchronous" operation while one is
+ * already in progress will result in an error.
+ *
+ * Internally, this is represented by the state being P7IOC_PHB_STATE_FUNCTIONAL
+ * when no operation is in progress, which it reaches at the end of the
+ * boot time initializations. Any attempt at performing a slot operation
+ * on a PHB in that state will change the state to the corresponding
+ * operation state machine. Any attempt while not in that state will
+ * return an error.
+ *
+ * Some operations allow for a certain amount of retries, this is
+ * provided for by the "retries" structure member for use by the state
+ * machine as it sees fit.
+ */
+enum phb3_state {
+ /* First init state */
+ PHB3_STATE_UNINITIALIZED,
+
+ /* During PHB HW inits */
+ PHB3_STATE_INITIALIZING,
+
+ /* Set if the PHB is for some reason unusable */
+ PHB3_STATE_BROKEN,
+
+ /* PHB fenced */
+ PHB3_STATE_FENCED,
+
+ /* Normal PHB functional state */
+ PHB3_STATE_FUNCTIONAL,
+
+ /* Hot reset */
+ PHB3_STATE_HRESET_DELAY,
+ PHB3_STATE_HRESET_DELAY2,
+
+ /* Fundamental reset */
+ PHB3_STATE_FRESET_ASSERT_DELAY,
+ PHB3_STATE_FRESET_DEASSERT_DELAY,
+
+ /* Complete reset */
+ PHB3_STATE_CRESET_WAIT_CQ,
+ PHB3_STATE_CRESET_REINIT,
+ PHB3_STATE_CRESET_FRESET,
+
+ /* Link state machine */
+ PHB3_STATE_WAIT_LINK_ELECTRICAL,
+ PHB3_STATE_WAIT_LINK,
+};
+
+/*
+ * PHB3 error descriptor. Errors from all components (PBCQ, PHB)
+ * will be cached to PHB3 instance. However, PBCQ errors would
+ * have higher priority than those from PHB
+ */
+#define PHB3_ERR_SRC_NONE 0
+#define PHB3_ERR_SRC_PBCQ 1
+#define PHB3_ERR_SRC_PHB 2
+
+#define PHB3_ERR_CLASS_NONE 0
+#define PHB3_ERR_CLASS_DEAD 1
+#define PHB3_ERR_CLASS_FENCED 2
+#define PHB3_ERR_CLASS_ER 3
+#define PHB3_ERR_CLASS_INF 4
+#define PHB3_ERR_CLASS_LAST 5
+
+struct phb3_err {
+ uint32_t err_src;
+ uint32_t err_class;
+ uint32_t err_bit;
+};
+
+/* Link timeouts, increments of 100ms */
+#define PHB3_LINK_WAIT_RETRIES 20
+#define PHB3_LINK_ELECTRICAL_RETRIES 10
+
+/* PHB3 flags */
+#define PHB3_AIB_FENCED 0x00000001
+#define PHB3_CFG_USE_ASB 0x00000002
+#define PHB3_CFG_BLOCKED 0x00000004
+
+struct phb3 {
+ unsigned int index; /* 0..2 index inside P8 */
+ unsigned int flags;
+ unsigned int chip_id; /* Chip ID (== GCID on P8) */
+ unsigned int rev; /* 00MMmmmm */
+#define PHB3_REV_MURANO_DD10 0xa30001
+#define PHB3_REV_VENICE_DD10 0xa30002
+#define PHB3_REV_MURANO_DD20 0xa30003
+#define PHB3_REV_MURANO_DD21 0xa30004
+#define PHB3_REV_VENICE_DD20 0xa30005
+ void *regs;
+ uint64_t pe_xscom; /* XSCOM bases */
+ uint64_t pci_xscom;
+ uint64_t spci_xscom;
+ struct lock lock;
+ uint64_t mm0_base; /* Full MM window to PHB */
+ uint64_t mm0_size; /* '' '' '' */
+ uint64_t mm1_base; /* Full MM window to PHB */
+ uint64_t mm1_size; /* '' '' '' */
+ uint32_t base_msi;
+ uint32_t base_lsi;
+
+ /* SkiBoot owned in-memory tables */
+ uint64_t tbl_rtt;
+ uint64_t tbl_peltv;
+ uint64_t tbl_pest;
+ uint64_t tbl_ivt;
+ uint64_t tbl_rba;
+
+ bool skip_perst; /* Skip first perst */
+ bool has_link;
+ bool use_ab_detect;
+ enum phb3_state state;
+ uint64_t delay_tgt_tb;
+ uint64_t retries;
+ int64_t ecap; /* cached PCI-E cap offset */
+ int64_t aercap; /* cached AER ecap offset */
+ const __be64 *lane_eq;
+ uint64_t capp_ucode_base;
+ bool capp_ucode_loaded;
+ unsigned int max_link_speed;
+
+ uint16_t rte_cache[RTT_TABLE_SIZE/2];
+ uint8_t peltv_cache[PELTV_TABLE_SIZE];
+ uint64_t lxive_cache[8];
+ uint64_t ive_cache[IVT_TABLE_ENTRIES];
+ uint64_t tve_cache[512];
+ uint64_t m32d_cache[256];
+ uint64_t m64b_cache[16];
+ uint64_t nfir_cache; /* Used by complete reset */
+ bool err_pending;
+ struct phb3_err err;
+
+ struct phb phb;
+};
+
+static inline struct phb3 *phb_to_phb3(struct phb *phb)
+{
+ return container_of(phb, struct phb3, phb);
+}
+
+static inline uint64_t phb3_read_reg_asb(struct phb3 *p, uint64_t offset)
+{
+ uint64_t val;
+
+ xscom_write(p->chip_id, p->spci_xscom, offset);
+ xscom_read(p->chip_id, p->spci_xscom + 0x2, &val);
+
+ return val;
+}
+
+static inline void phb3_write_reg_asb(struct phb3 *p,
+ uint64_t offset, uint64_t val)
+{
+ xscom_write(p->chip_id, p->spci_xscom, offset);
+ xscom_write(p->chip_id, p->spci_xscom + 0x2, val);
+}
+
+static inline bool phb3_err_pending(struct phb3 *p)
+{
+ return p->err_pending;
+}
+
+static inline void phb3_set_err_pending(struct phb3 *p, bool pending)
+{
+ if (!pending) {
+ p->err.err_src = PHB3_ERR_SRC_NONE;
+ p->err.err_class = PHB3_ERR_CLASS_NONE;
+ p->err.err_bit = -1;
+ }
+
+ p->err_pending = pending;
+}
+
+#endif /* __PHB3_H */
diff --git a/include/platform.h b/include/platform.h
new file mode 100644
index 0000000..689a80b
--- /dev/null
+++ b/include/platform.h
@@ -0,0 +1,122 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __PLATFORM_H
+#define __PLATFORM_H
+
+/* Some fwd declarations for types used further down */
+struct phb;
+struct pci_device;
+
+/*
+ * Each platform can provide a set of hooks
+ * that can affect the generic code
+ */
+struct platform {
+ const char *name;
+
+ /*
+ * Probe platform, return true on a match, called before
+ * any allocation has been performed outside of the heap
+ * so the platform can perform additional memory reservations
+ * here if needed.
+ *
+ * Only the boot CPU is running at this point and the cpu_thread
+ * structure for secondaries have not been initialized yet. The
+ * timebases are not synchronized.
+ *
+ * Services available:
+ *
+ * - Memory allocations / reservations
+ * - XSCOM
+ * - FSI
+ * - Host Services
+ */
+ bool (*probe)(void);
+
+ /*
+ * This is called right after the secondary processors are brought
+ * up and the timebases in sync to perform any additional platform
+ * specific initializations. On FSP based machines, this is where
+ * the FSP driver is brought up.
+ */
+ void (*init)(void);
+
+ /*
+ * These are used to power down and reboot the machine
+ */
+ int64_t (*cec_power_down)(uint64_t request);
+ int64_t (*cec_reboot)(void);
+
+ /*
+ * This is called once per PHB before probing. It allows the
+ * platform to setup some PHB private data that can be used
+ * later on by calls such as pci_get_slot_info() below. The
+ * "index" argument is the PHB index within the IO HUB (or
+ * P8 chip).
+ *
+ * This is called before the PHB HW has been initialized.
+ */
+ void (*pci_setup_phb)(struct phb *phb, unsigned int index);
+
+ /*
+ * Called during PCI scan for each device. For bridges, this is
+ * called before its children are probed. This is called for
+ * every device and for the PHB itself with a NULL pd though
+ * typically the implementation will only populate the slot
+ * info structure for bridge ports
+ */
+ void (*pci_get_slot_info)(struct phb *phb,
+ struct pci_device *pd);
+
+ /*
+ * Called after PCI probe is complete and before inventory is
+ * displayed in console. This can either run platform fixups or
+ * can be used to send the inventory to a service processor.
+ */
+ void (*pci_probe_complete)(void);
+
+ /*
+ * If the above is set to skiboot, the handler is here
+ */
+ void (*external_irq)(unsigned int chip_id);
+
+ /*
+ * nvram ops.
+ *
+ * Note: To keep the FSP driver simple, we only ever read the
+ * whole nvram once at boot and we do this passing a dst buffer
+ * that is 4K aligned. The read is asynchronous, the backend
+ * must call nvram_read_complete() when done (it's allowed to
+ * do it recursively from nvram_read though).
+ */
+ int (*nvram_info)(uint32_t *total_size);
+ int (*nvram_start_read)(void *dst, uint32_t src,
+ uint32_t len);
+ int (*nvram_write)(uint32_t dst, void *src, uint32_t len);
+};
+
+extern struct platform __platforms_start;
+extern struct platform __platforms_end;
+
+struct platform platform;
+
+#define DECLARE_PLATFORM(name)\
+static const struct platform __used __section(".platforms") name ##_platform
+
+extern void probe_platform(void);
+
+#endif /* __PLATFORM_H */
diff --git a/include/processor.h b/include/processor.h
new file mode 100644
index 0000000..c15e17e
--- /dev/null
+++ b/include/processor.h
@@ -0,0 +1,318 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __PROCESSOR_H
+#define __PROCESSOR_H
+
+#include <bitutils.h>
+
+/* P7 MSR bits */
+#define MSR_SF PPC_BIT(0) /* 64-bit mode */
+#define MSR_HV PPC_BIT(3) /* Hypervisor mode */
+#define MSR_VEC PPC_BIT(38) /* VMX enable */
+#define MSR_VSX PPC_BIT(40) /* VSX enable */
+#define MSR_EE PPC_BIT(48) /* External Int. Enable */
+#define MSR_PR PPC_BIT(49) /* Problem state */
+#define MSR_FP PPC_BIT(50) /* Floating Point Enable */
+#define MSR_ME PPC_BIT(51) /* Machine Check Enable */
+#define MSR_FE0 PPC_BIT(52) /* FP Exception 0 */
+#define MSR_SE PPC_BIT(53) /* Step enable */
+#define MSR_BE PPC_BIT(54) /* Branch trace enable */
+#define MSR_FE1 PPC_BIT(55) /* FP Exception 1 */
+#define MSR_IR PPC_BIT(58) /* Instructions reloc */
+#define MSR_DR PPC_BIT(59) /* Data reloc */
+#define MSR_PMM PPC_BIT(61) /* Perf Monitor */
+#define MSR_RI PPC_BIT(62) /* Recoverable Interrupt */
+#define MSR_LE PPC_BIT(63) /* Little Endian */
+
+/* PIR */
+#define SPR_PIR_P8_THREAD_MASK 0x0007 /* Mask of thread bits */
+#define SPR_PIR_P8_MASK 0x1fff /* Mask of implemented bits */
+#define SPR_PIR_P7_THREAD_MASK 0x0003 /* Mask of thread bits */
+#define SPR_PIR_P7_MASK 0x03ff /* Mask of implemented bits */
+
+/* SPR register definitions */
+#define SPR_DSISR 0x012 /* RW: Data storage interrupt status reg */
+#define SPR_DAR 0x013 /* RW: Data address reg */
+#define SPR_SDR1 0x019
+#define SPR_SRR0 0x01a /* RW: Exception save/restore reg 0 */
+#define SPR_SRR1 0x01b /* RW: Exception save/restore reg 1 */
+#define SPR_CFAR 0x01c /* RW: Come From Address Register */
+#define SPR_TBRL 0x10c /* RO: Timebase low */
+#define SPR_TBRU 0x10d /* RO: Timebase high */
+#define SPR_SPRC 0x114 /* RW: Access to uArch SPRs (ex SCOMC) */
+#define SPR_SPRD 0x115 /* RW: Access to uArch SPRs (ex SCOMD) */
+#define SPR_SCOMC 0x114 /* RW: SCOM Control - old name of SPRC */
+#define SPR_SCOMD 0x115 /* RW: SCOM Data - old name of SPRD */
+#define SPR_TBWL 0x11c /* RW: Timebase low */
+#define SPR_TBWU 0x11d /* RW: Timebase high */
+#define SPR_TBU40 0x11e /* RW: Timebase Upper 40 bit */
+#define SPR_PVR 0x11f /* RO: Processor version register */
+#define SPR_HSPRG0 0x130 /* RW: Hypervisor scratch 0 */
+#define SPR_HSPRG1 0x131 /* RW: Hypervisor scratch 1 */
+#define SPR_HSRR0 0x13a /* RW: HV Exception save/restore reg 0 */
+#define SPR_HSRR1 0x13b /* RW: HV Exception save/restore reg 1 */
+#define SPR_TFMR 0x13d
+#define SPR_LPCR 0x13e
+#define SPR_HMER 0x150 /* Hypervisor Maintenance Exception */
+#define SPR_HMEER 0x151 /* HMER interrupt enable mask */
+#define SPR_AMOR 0x15d
+#define SPR_TSCR 0x399
+#define SPR_HID0 0x3f0
+#define SPR_HID1 0x3f1
+#define SPR_HID2 0x3f8
+#define SPR_HID4 0x3f4
+#define SPR_HID5 0x3f6
+#define SPR_PIR 0x3ff /* RO: Processor Identification */
+
+/* Bits in LPCR */
+
+/* Powersave Exit Cause Enable is different for P7 and P8 */
+#define SPR_LPCR_P7_PECE PPC_BITMASK(49,51)
+#define SPR_LPCR_P7_PECE0 PPC_BIT(49) /* Wake on external interrupts */
+#define SPR_LPCR_P7_PECE1 PPC_BIT(50) /* Wake on decrementer */
+#define SPR_LPCR_P7_PECE2 PPC_BIT(51) /* Wake on MCs, HMIs, etc... */
+
+#define SPR_LPCR_P8_PECE PPC_BITMASK(47,51)
+#define SPR_LPCR_P8_PECE0 PPC_BIT(47) /* Wake on priv doorbell */
+#define SPR_LPCR_P8_PECE1 PPC_BIT(48) /* Wake on hv doorbell */
+#define SPR_LPCR_P8_PECE2 PPC_BIT(49) /* Wake on external interrupts */
+#define SPR_LPCR_P8_PECE3 PPC_BIT(50) /* Wake on decrementer */
+#define SPR_LPCR_P8_PECE4 PPC_BIT(51) /* Wake on MCs, HMIs, etc... */
+
+
+/* Bits in TFMR - control bits */
+#define SPR_TFMR_MAX_CYC_BET_STEPS_MASK PPC_BITMASK(0,7)
+#define SPR_TFMR_MAX_CYC_BET_STEPS_LSH PPC_BITLSHIFT(7)
+#define SPR_TFMR_N_CLKS_PER_STEP_MASK PPC_BITMASK(8,9)
+#define SPR_TFMR_N_CLKS_PER_STEP_LSH PPC_BITLSHIFT(9)
+#define SPR_TFMR_MASK_HMI PPC_BIT(10)
+#define SPR_TFMR_SYNC_BIT_SEL_MASK PPC_BITMASK(11,13)
+#define SPR_TFMR_SYNC_BIT_SEL_LSH PPC_BITLSHIFT(13)
+#define SPR_TFMR_TB_ECLIPZ PPC_BIT(14)
+#define SPR_TFMR_LOAD_TOD_MOD PPC_BIT(16)
+#define SPR_TFMR_MOVE_CHIP_TOD_TO_TB PPC_BIT(18)
+#define SPR_TFMR_CLEAR_TB_ERRORS PPC_BIT(24)
+/* Bits in TFMR - thread indep. status bits */
+#define SPR_TFMR_HDEC_PARITY_ERROR PPC_BIT(26)
+#define SPR_TFMR_TBST_CORRUPT PPC_BIT(27)
+#define SPR_TFMR_TBST_ENCODED_MASK PPC_BITMASK(28,31)
+#define SPR_TFMR_TBST_ENCODED_LSH PPC_BITLSHIFT(31)
+#define SPR_TFMR_TBST_LAST_MASK PPC_BITMASK(32,35)
+#define SPR_TFMR_TBST_LAST_LSH PPC_BITLSHIFT(35)
+#define SPR_TFMR_TB_ENABLED PPC_BIT(40)
+#define SPR_TFMR_TB_VALID PPC_BIT(41)
+#define SPR_TFMR_TB_SYNC_OCCURED PPC_BIT(42)
+#define SPR_TFMR_TB_MISSING_SYNC PPC_BIT(43)
+#define SPR_TFMR_TB_MISSING_STEP PPC_BIT(44)
+#define SPR_TFMR_TB_RESIDUE_ERR PPC_BIT(45)
+#define SPR_TFMR_FW_CONTROL_ERR PPC_BIT(46)
+#define SPR_TFMR_CHIP_TOD_STATUS_MASK PPC_BITMASK(47,50)
+#define SPR_TFMR_CHIP_TOD_STATUS_LSH PPC_BITLSHIFT(50)
+#define SPR_TFMR_CHIP_TOD_INTERRUPT PPC_BIT(51)
+#define SPR_TFMR_CHIP_TOD_TIMEOUT PPC_BIT(54)
+#define SPR_TFMR_CHIP_TOD_PARITY_ERR PPC_BIT(56)
+/* Bits in TFMR - thread specific. status bits */
+#define SPR_TFMR_PURR_PARITY_ERR PPC_BIT(57)
+#define SPR_TFMR_SPURR_PARITY_ERR PPC_BIT(58)
+#define SPR_TFMR_DEC_PARITY_ERR PPC_BIT(59)
+#define SPR_TFMR_TFMR_CORRUPT PPC_BIT(60)
+#define SPR_TFMR_PURR_OVERFLOW PPC_BIT(61)
+#define SPR_TFMR_SPURR_OVERFLOW PPC_BIT(62)
+
+/* Bits in HMER/HMEER */
+#define SPR_HMER_MALFUNCTION_ALERT PPC_BIT(0)
+#define SPR_HMER_PROC_RECV_DONE PPC_BIT(2)
+#define SPR_HMER_PROC_RECV_ERROR_MASKED PPC_BIT(3)
+#define SPR_HMER_TFAC_ERROR PPC_BIT(4)
+#define SPR_HMER_TFMR_PARITY_ERROR PPC_BIT(5)
+#define SPR_HMER_XSCOM_FAIL PPC_BIT(8)
+#define SPR_HMER_XSCOM_DONE PPC_BIT(9)
+#define SPR_HMER_PROC_RECV_AGAIN PPC_BIT(11)
+#define SPR_HMER_WARN_RISE PPC_BIT(14)
+#define SPR_HMER_WARN_FALL PPC_BIT(15)
+#define SPR_HMER_SCOM_FIR_HMI PPC_BIT(16)
+#define SPR_HMER_TRIG_FIR_HMI PPC_BIT(17)
+#define SPR_HMER_HYP_RESOURCE_ERR PPC_BIT(20)
+#define SPR_HMER_XSCOM_STATUS_MASK PPC_BITMASK(21,23)
+#define SPR_HMER_XSCOM_STATUS_LSH PPC_BITLSHIFT(23)
+
+/*
+ * HMEER: initial bits for HMI interrupt enable mask.
+ * Per Dave Larson, never enable 8,9,21-23
+ */
+#define SPR_HMEER_HMI_ENABLE_MASK (SPR_HMER_MALFUNCTION_ALERT |\
+ SPR_HMER_HYP_RESOURCE_ERR |\
+ SPR_HMER_PROC_RECV_DONE |\
+ SPR_HMER_PROC_RECV_ERROR_MASKED |\
+ SPR_HMER_TFAC_ERROR |\
+ SPR_HMER_TFMR_PARITY_ERROR |\
+ SPR_HMER_PROC_RECV_AGAIN)
+
+/* Bits in HID0 */
+#define SPR_HID0_HILE PPC_BIT(19)
+#define SPR_HID0_ENABLE_ATTN PPC_BIT(31)
+
+/* PVR bits */
+#define SPR_PVR_TYPE_MASK 0xffff0000
+#define SPR_PVR_TYPE_LSH 16
+#define SPR_PVR_VERS_MAJ_MASK 0x00000f00
+#define SPR_PVR_VERS_MAJ_LSH 8
+#define SPR_PVR_VERS_MIN_MASK 0x000000ff
+#define SPR_PVR_VERS_MIN_LSH 0
+
+#define PVR_TYPE(_pvr) GETFIELD(SPR_PVR_TYPE, _pvr)
+#define PVR_VERS_MAJ(_pvr) GETFIELD(SPR_PVR_VERS_MAJ, _pvr)
+#define PVR_VERS_MIN(_pvr) GETFIELD(SPR_PVR_VERS_MIN, _pvr)
+
+/* PVR definitions */
+#define PVR_TYPE_P7 0x003f
+#define PVR_TYPE_P7P 0x004a
+#define PVR_TYPE_P8E 0x004b /* Murano */
+#define PVR_TYPE_P8 0x004d /* Venice */
+
+#ifdef __ASSEMBLY__
+
+/* Thread priority control opcodes */
+#define smt_low or 1,1,1
+#define smt_medium or 2,2,2
+#define smt_high or 3,3,3
+#define smt_medium_high or 5,5,5
+#define smt_medium_low or 6,6,6
+#define smt_extra_high or 7,7,7
+#define smt_very_low or 31,31,31
+
+#else /* __ASSEMBLY__ */
+
+#include <compiler.h>
+#include <stdint.h>
+
+/*
+ * SMT priority
+ */
+
+static inline void smt_low(void) { asm volatile("or 1,1,1"); }
+static inline void smt_medium(void) { asm volatile("or 2,2,2"); }
+static inline void smt_high(void) { asm volatile("or 3,3,3"); }
+static inline void smt_medium_high(void){ asm volatile("or 5,5,5"); }
+static inline void smt_medium_low(void) { asm volatile("or 6,6,6"); }
+static inline void smt_extra_high(void) { asm volatile("or 7,7,7"); }
+static inline void smt_very_low(void) { asm volatile("or 31,31,31"); }
+
+/*
+ * SPR access functions
+ */
+
+static inline unsigned long mfmsr(void)
+{
+ unsigned long val;
+
+ asm volatile("mfmsr %0" : "=r"(val) : : "memory");
+ return val;
+}
+
+static inline void mtmsr(unsigned long val)
+{
+ asm volatile("mtmsr %0" : : "r"(val) : "memory");
+}
+
+static inline void mtmsrd(unsigned long val, int l)
+{
+ asm volatile("mtmsrd %0,%1" : : "r"(val), "i"(l) : "memory");
+}
+
+static inline unsigned long mfspr(unsigned int spr)
+{
+ unsigned long val;
+
+ asm volatile("mfspr %0,%1" : "=r"(val) : "i"(spr) : "memory");
+ return val;
+}
+
+static inline void mtspr(unsigned int spr, unsigned long val)
+{
+ asm volatile("mtspr %0,%1" : : "i"(spr), "r"(val) : "memory");
+}
+
+/* Helpers for special sequences needed by some registers */
+extern void set_hid0(unsigned long hid0);
+extern void trigger_attn(void);
+
+/*
+ * Barriers
+ */
+
+static inline void eieio(void)
+{
+ asm volatile("eieio" : : : "memory");
+}
+
+static inline void sync(void)
+{
+ asm volatile("sync" : : : "memory");
+}
+
+static inline void lwsync(void)
+{
+ asm volatile("lwsync" : : : "memory");
+}
+
+static inline void isync(void)
+{
+ asm volatile("isync" : : : "memory");
+}
+
+
+/*
+ * Cache sync
+ */
+static inline void sync_icache(void)
+{
+ asm volatile("sync; icbi 0,%0; sync; isync" : : "r" (0) : "memory");
+}
+
+
+/*
+ * Byteswap load/stores
+ */
+
+static inline uint16_t ld_le16(const uint16_t *addr)
+{
+ uint16_t val;
+ asm volatile("lhbrx %0,0,%1" : "=r"(val) : "r"(addr), "m"(*addr));
+ return val;
+}
+
+static inline uint32_t ld_le32(const uint32_t *addr)
+{
+ uint32_t val;
+ asm volatile("lwbrx %0,0,%1" : "=r"(val) : "r"(addr), "m"(*addr));
+ return val;
+}
+
+static inline void st_le16(uint16_t *addr, uint16_t val)
+{
+ asm volatile("sthbrx %0,0,%1" : : "r"(val), "r"(addr), "m"(*addr));
+}
+
+static inline void st_le32(uint32_t *addr, uint32_t val)
+{
+ asm volatile("stwbrx %0,0,%1" : : "r"(val), "r"(addr), "m"(*addr));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __PROCESSOR_H */
diff --git a/include/psi.h b/include/psi.h
new file mode 100644
index 0000000..62232dc
--- /dev/null
+++ b/include/psi.h
@@ -0,0 +1,239 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * IBM System P PSI (Processor Service Interface)
+ */
+#ifndef __PSI_H
+#define __PSI_H
+
+#include <skiboot.h>
+
+/*
+ * PSI Host Bridge Registers (MMIO)
+ *
+ * The PSI interface is the bridge to the FPS, it has its own
+ * registers. The FSP registers appear at an offset within the
+ * aperture defined by the PSI_FSPBAR
+ */
+/* Base address of the PSI MMIO space and LSB is the enable/valid bit */
+#define PSIHB_BBAR 0x00
+
+/* FSP MMIO region -- this is where the mbx regs are (offset defined below) */
+#define PSIHB_FSPBAR 0x08
+
+/* FSP MMIO region mask register -- determines size of region */
+#define PSIHB_FSPMMR 0x10
+
+/* TCE address register */
+#define PSIHB_TAR 0x18
+#define PSIHB_TAR_8K_ENTRIES 0
+#define PSIHB_TAR_16K_ENTRIES 1
+#define PSIHB_TAR_256K_ENTRIES 2 /* P8 only */
+#define PSIHB_TAR_512K_ENTRIES 4 /* P8 only */
+
+/* PSI Host Bridge Control Register
+ *
+ * note: TCE_ENABLE moved to the new PSIHB_PHBSCR on P8 but is
+ * the same bit position
+ */
+#define PSIHB_CR 0x20
+#define PSIHB_CR_FSP_CMD_ENABLE PPC_BIT(0)
+#define PSIHB_CR_FSP_MMIO_ENABLE PPC_BIT(1)
+#define PSIHB_CR_TCE_ENABLE PPC_BIT(2) /* P7 only */
+#define PSIHB_CR_FSP_IRQ_ENABLE PPC_BIT(3)
+#define PSIHB_CR_FSP_ERR_RSP_ENABLE PPC_BIT(4)
+#define PSIHB_CR_PSI_LINK_ENABLE PPC_BIT(5)
+#define PSIHB_CR_FSP_RESET PPC_BIT(6)
+#define PSIHB_CR_PSIHB_RESET PPC_BIT(7)
+#define PSIHB_CR_PSI_IRQ PPC_BIT(16) /* PSIHB interrupt */
+#define PSIHB_CR_FSP_IRQ PPC_BIT(17) /* FSP interrupt */
+#define PSIHB_CR_FSP_LINK_ACTIVE PPC_BIT(18) /* FSP link active */
+
+/* Error conditions in the GXHB */
+#define PSIHB_CR_PSI_ERROR PPC_BIT(32) /* PSI error */
+#define PSIHB_CR_PSI_LINK_INACTIVE PPC_BIT(33) /* Link inactive */
+#define PSIHB_CR_FSP_ACK_TIMEOUT PPC_BIT(34) /* FSP ack timeout */
+#define PSIHB_CR_MMIO_LOAD_TIMEOUT PPC_BIT(35) /* MMIO load timeout */
+#define PSIHB_CR_MMIO_LENGTH_ERROR PPC_BIT(36) /* MMIO length error */
+#define PSIHB_CR_MMIO_ADDRESS_ERROR PPC_BIT(37) /* MMIO address error */
+#define PSIHB_CR_MMIO_TYPE_ERROR PPC_BIT(38) /* MMIO type error */
+#define PSIHB_CR_UE PPC_BIT(39) /* UE detected */
+#define PSIHB_CR_PARITY_ERROR PPC_BIT(40) /* Parity error */
+#define PSIHB_CR_SYNC_ERR_ALERT1 PPC_BIT(41) /* Sync alert 1 */
+#define PSIHB_CR_SYNC_ERR_ALERT2 PPC_BIT(42) /* Sync alert 2 */
+#define PSIHB_CR_FSP_COMMAND_ERROR PPC_BIT(43) /* FSP cmd error */
+
+/* PSI Status / Error Mask Register */
+#define PSIHB_SEMR 0x28
+
+/* XIVR and BUID used for PSI interrupts on P7 */
+#define PSIHB_XIVR 0x30
+
+/* XIVR and BUID used for PSI interrupts on P8 */
+#define PSIHB_XIVR_FSP 0x30
+#define PSIHB_XIVR_OCC 0x60
+#define PSIHB_XIVR_FSI 0x68
+#define PSIHB_XIVR_LPC 0x70
+#define PSIHB_XIVR_LOCAL_ERR 0x78
+#define PSIHB_XIVR_HOST_ERR 0x80
+#define PSIHB_ISRN 0x88
+#define PSIHB_ISRN_COMP_MASK PPC_BITMASK(0,18)
+#define PSIHB_ISRN_COMP_LSH PPC_BITLSHIFT(18)
+#define PSIHB_ISRN_IRQ_MUX PPC_BIT(28)
+#define PSIHB_ISRN_IRQ_RESET PPC_BIT(29)
+#define PSIHB_ISRN_DOWNSTREAM_EN PPC_BIT(30)
+#define PSIHB_ISRN_UPSTREAM_EN PPC_BIT(31)
+#define PSIHB_ISRN_MASK_MASK PPC_BITMASK(32,50)
+#define PSIHB_ISRN_MASK_LSH PPC_BITLSHIFT(50)
+
+#define PSIHB_IRQ_STATUS 0x58
+#define PSIHB_IRQ_STAT_OCC PPC_BIT(27)
+#define PSIHB_IRQ_STAT_FSI PPC_BIT(28)
+#define PSIHB_IRQ_STAT_LPC PPC_BIT(29)
+#define PSIHB_IRQ_STAT_LOCAL_ERR PPC_BIT(30)
+#define PSIHB_IRQ_STAT_HOST_ERR PPC_BIT(31)
+
+/* Secure version of CR for P8 (TCE enable bit) */
+#define PSIHB_PHBSCR 0x90
+
+/*
+ * PSI Host Bridge Registers (XSCOM)
+ */
+#define PSIHB_XSCOM_P7_HBBAR 0x9
+#define PSIHB_XSCOM_P7_HBBAR_EN PPC_BIT(28)
+#define PSIHB_XSCOM_P7_HBCSR 0xd
+#define PSIHB_XSCOM_P7_HBCSR_SET 0x11
+#define PSIHB_XSCOM_P7_HBCSR_CLR 0x12
+#define PSIHB_XSCOM_P7_HBSCR_FSP_IRQ PPC_BIT(13)
+
+#define PSIHB_XSCOM_P8_BASE 0xa
+#define PSIHB_XSCOM_P8_HBBAR_EN PPC_BIT(63)
+#define PSIHB_XSCOM_P8_HBCSR 0xe
+#define PSIHB_XSCOM_P8_HBCSR_SET 0x12
+#define PSIHB_XSCOM_P8_HBCSR_CLR 0x13
+#define PSIHB_XSCOM_P8_HBSCR_FSP_IRQ PPC_BIT(17)
+
+
+/*
+ * Layout of the PSI DMA address space
+ *
+ * On P7, we instanciate a TCE table of 16K TCEs mapping 64M
+ *
+ * On P8, we use a larger mapping of 256K TCEs which provides
+ * us with a 1G window in order to fit the trace buffers
+ *
+ * Currently we have:
+ *
+ * - 4x256K serial areas (each divided in 2: in and out buffers)
+ * - 1M region for inbound buffers
+ * - 2M region for generic data fetches
+ */
+#define PSI_DMA_SER0_BASE 0x00000000
+#define PSI_DMA_SER0_SIZE 0x00040000
+#define PSI_DMA_SER1_BASE 0x00040000
+#define PSI_DMA_SER1_SIZE 0x00040000
+#define PSI_DMA_SER2_BASE 0x00080000
+#define PSI_DMA_SER2_SIZE 0x00040000
+#define PSI_DMA_SER3_BASE 0x000c0000
+#define PSI_DMA_SER3_SIZE 0x00040000
+#define PSI_DMA_INBOUND_BUF 0x00100000
+#define PSI_DMA_INBOUND_SIZE 0x00100000
+#define PSI_DMA_FETCH 0x00200000
+#define PSI_DMA_FETCH_SIZE 0x00800000
+#define PSI_DMA_NVRAM_BODY 0x00a00000
+#define PSI_DMA_NVRAM_BODY_SZ 0x00100000
+#define PSI_DMA_NVRAM_TRIPL 0x00b00000
+#define PSI_DMA_NVRAM_TRIPL_SZ 0x00001000
+#define PSI_DMA_OP_PANEL_MISC 0x00b01000
+#define PSI_DMA_OP_PANEL_SIZE 0x00001000
+#define PSI_DMA_GET_SYSPARAM 0x00b02000
+#define PSI_DMA_GET_SYSPARAM_SZ 0x00001000
+#define PSI_DMA_SET_SYSPARAM 0x00b03000
+#define PSI_DMA_SET_SYSPARAM_SZ 0x00001000
+#define PSI_DMA_ERRLOG_READ_BUF 0x00b04000
+#define PSI_DMA_ERRLOG_READ_BUF_SZ 0x00040000
+#define PSI_DMA_ELOG_PANIC_WRITE_BUF 0x00b44000
+#define PSI_DMA_ELOG_PANIC_WRITE_BUF_SZ 0x00010000
+#define PSI_DMA_ERRLOG_WRITE_BUF 0x00b54000
+#define PSI_DMA_ERRLOG_WRITE_BUF_SZ 0x00050000
+#define PSI_DMA_HBRT_LOG_WRITE_BUF 0x00ba4000
+#define PSI_DMA_HBRT_LOG_WRITE_BUF_SZ 0x00001000
+#define PSI_DMA_CODE_UPD 0x00c04000
+#define PSI_DMA_CODE_UPD_SIZE 0x01001000
+#define PSI_DMA_DUMP_DATA 0x01c05000
+#define PSI_DMA_DUMP_DATA_SIZE 0x00500000
+#define PSI_DMA_SENSOR_BUF 0x02105000
+#define PSI_DMA_SENSOR_BUF_SZ 0x00080000
+#define PSI_DMA_MDST_TABLE 0x02185000
+#define PSI_DMA_MDST_TABLE_SIZE 0x00001000
+#define PSI_DMA_HYP_DUMP 0x02186000
+#define PSI_DMA_HYP_DUMP_SIZE 0x01000000
+#define PSI_DMA_PCIE_INVENTORY 0x03186000
+#define PSI_DMA_PCIE_INVENTORY_SIZE 0x00010000
+#define PSI_DMA_LED_BUF 0x03196000
+#define PSI_DMA_LED_BUF_SZ 0x00001000
+#define PSI_DMA_LOC_COD_BUF 0x03197000
+#define PSI_DMA_LOC_COD_BUF_SZ 0x00008000
+#define PSI_DMA_MEMCONS 0x0319f000
+#define PSI_DMA_MEMCONS_SZ 0x00001000
+#define PSI_DMA_LOG_BUF 0x03200000
+#define PSI_DMA_LOG_BUF_SZ 0x00100000 /* INMEM_CON_LEN */
+
+/* P8 only mappings */
+#define PSI_DMA_TRACE_BASE 0x04000000
+
+struct psi {
+ struct list_node list;
+ uint64_t xscom_base;
+ void *regs;
+ unsigned int chip_id;
+ unsigned int interrupt;
+ bool working;
+ bool active;
+};
+
+extern void psi_set_link_polling(bool active);
+
+extern struct psi *first_psi;
+extern void psi_init(void);
+extern struct psi *psi_find_link(uint32_t chip_id);
+extern void psi_init_for_fsp(struct psi *psi);
+extern void psi_disable_link(struct psi *psi);
+extern bool psi_check_link_active(struct psi *psi);
+extern bool psi_poll_fsp_interrupt(struct psi *psi);
+
+/* Interrupts */
+extern void psi_irq_reset(void);
+extern void psi_enable_fsp_interrupt(struct psi *psi);
+extern void psi_fsp_link_in_use(struct psi *psi);
+
+/*
+ * Must be called by the platform probe() function as the policy
+ * is established before platform.init
+ *
+ * This defines whether the external interrupt should be passed to
+ * the OS or handled locally in skiboot. Return true for skiboot
+ * handling. Default if not called is Linux.
+ */
+#define EXTERNAL_IRQ_POLICY_LINUX false
+#define EXTERNAL_IRQ_POLICY_SKIBOOT true
+extern void psi_set_external_irq_policy(bool policy);
+
+
+
+#endif /* __PSI_H */
diff --git a/include/sfc-ctrl.h b/include/sfc-ctrl.h
new file mode 100644
index 0000000..8ff9780
--- /dev/null
+++ b/include/sfc-ctrl.h
@@ -0,0 +1,24 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef SFC_CTRL_H
+#define SFC_CTRL_H
+
+struct spi_flash_ctrl;
+
+extern int sfc_open(struct spi_flash_ctrl **ctrl);
+extern void sfc_close(struct spi_flash_ctrl *ctrl);
+
+#endif /* SFC_CTRL_H */
diff --git a/include/skiboot.h b/include/skiboot.h
new file mode 100644
index 0000000..c5b1fe0
--- /dev/null
+++ b/include/skiboot.h
@@ -0,0 +1,202 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __SKIBOOT_H
+#define __SKIBOOT_H
+
+#include <compiler.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <errno.h>
+#include <bitutils.h>
+#include <types.h>
+
+#include <ccan/container_of/container_of.h>
+#include <ccan/list/list.h>
+#include <ccan/short_types/short_types.h>
+#include <ccan/build_assert/build_assert.h>
+#include <ccan/array_size/array_size.h>
+#include <ccan/endian/endian.h>
+#include <ccan/str/str.h>
+
+#include <mem-map.h>
+#include <op-panel.h>
+#include <platform.h>
+
+/* Special ELF sections */
+#define __force_data __section(".force.data")
+
+/* Readonly section start and end. */
+extern char __rodata_start[], __rodata_end[];
+
+static inline bool is_rodata(const void *p)
+{
+ return ((const char *)p >= __rodata_start && (const char *)p < __rodata_end);
+}
+
+/* Debug descriptor. This structure is pointed to by the word at offset
+ * 0x80 in the sapphire binary
+ */
+struct debug_descriptor {
+ u8 eye_catcher[8]; /* "OPALdbug" */
+#define DEBUG_DESC_VERSION 1
+ u32 version;
+ u32 reserved[3];
+
+ /* Memory console */
+ u64 memcons_phys;
+ u32 memcons_tce;
+ u32 memcons_obuf_tce;
+ u32 memcons_ibuf_tce;
+
+ /* Traces */
+ u64 trace_mask;
+ u32 num_traces;
+#define DEBUG_DESC_MAX_TRACES 256
+ u64 trace_phys[DEBUG_DESC_MAX_TRACES];
+ u32 trace_size[DEBUG_DESC_MAX_TRACES];
+ u32 trace_tce[DEBUG_DESC_MAX_TRACES];
+};
+extern struct debug_descriptor debug_descriptor;
+
+/* General utilities */
+#define prerror(fmt...) do { fprintf(stderr, fmt); } while(0)
+
+/* Location codes -- at most 80 chars with null termination */
+#define LOC_CODE_SIZE 80
+
+enum ipl_state {
+ ipl_initial = 0x00000000,
+ ipl_opl_sent = 0x00000001,
+ ipl_got_continue = 0x00000002,
+ ipl_got_new_role = 0x00000004,
+ ipl_got_caps = 0x00000008,
+ ipl_got_fsp_functional = 0x00000010
+};
+extern enum ipl_state ipl_state;
+
+/* Processor generation */
+enum proc_gen {
+ proc_gen_unknown,
+ proc_gen_p7, /* P7 and P7+ */
+ proc_gen_p8,
+};
+extern enum proc_gen proc_gen;
+
+/* Boot stack top */
+extern void *boot_stack_top;
+
+/* For use by debug code */
+extern void backtrace(void);
+
+/* Convert a 4-bit number to a hex char */
+extern char tohex(uint8_t nibble);
+
+/* Bit position of the most significant 1-bit (LSB=0, MSB=63) */
+static inline int ilog2(unsigned long val)
+{
+ int left_zeros;
+
+ asm volatile ("cntlzd %0,%1" : "=r" (left_zeros) : "r" (val));
+
+ return 63 - left_zeros;
+}
+
+static inline bool is_pow2(unsigned long val)
+{
+ return val == (1ul << ilog2(val));
+}
+
+#define lo32(x) ((x) & 0xffffffff)
+#define hi32(x) (((x) >> 32) & 0xffffffff)
+
+/* WARNING: _a *MUST* be a power of two */
+#define ALIGN_UP(_v, _a) (((_v) + (_a) - 1) & ~((_a) - 1))
+#define ALIGN_DOWN(_v, _a) ((_v) & ~((_a) - 1))
+
+/* TCE alignment */
+#define TCE_PSIZE 0x1000
+
+/* Not the greatest variants but will do for now ... */
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* Clean the stray high bit which the FSP inserts: we only have 52 bits real */
+static inline u64 cleanup_addr(u64 addr)
+{
+ return addr & ((1ULL << 52) - 1);
+}
+
+/* Start the kernel */
+extern void start_kernel(uint64_t entry, void* fdt,
+ uint64_t mem_top) __noreturn;
+extern void start_kernel32(uint64_t entry, void* fdt,
+ uint64_t mem_top) __noreturn;
+extern void start_kernel_secondary(uint64_t entry) __noreturn;
+
+/* Get description of machine from HDAT and create device-tree */
+extern void parse_hdat(bool is_opal, uint32_t master_cpu);
+
+/* Root of device tree. */
+extern struct dt_node *dt_root;
+
+/* Generated git id. */
+extern const char gitid[];
+
+/* Fast reboot support */
+extern void fast_reset(void);
+extern void __secondary_cpu_entry(void);
+extern void load_and_boot_kernel(bool is_reboot);
+extern void cleanup_tlb(void);
+extern void init_shared_sprs(void);
+extern void init_replicated_sprs(void);
+
+/* Various probe routines, to replace with an initcall system */
+extern void probe_p5ioc2(void);
+extern void probe_p7ioc(void);
+extern void probe_phb3(void);
+extern void uart_init(bool enable_interrupt);
+extern void homer_init(void);
+extern void add_cpu_idle_state_properties(void);
+extern void occ_pstates_init(void);
+extern void slw_init(void);
+extern void occ_fsp_init(void);
+
+/* NVRAM support */
+extern void nvram_init(void);
+extern void nvram_read_complete(bool success);
+
+/* NVRAM on flash helper */
+struct flash_chip;
+extern int flash_nvram_init(struct flash_chip *chip, uint32_t start,
+ uint32_t size);
+
+/* UART interrupt */
+extern void uart_irq(void);
+
+/* Flatten device-tree */
+extern void *create_dtb(const struct dt_node *root);
+
+/* SLW reinit function for switching core settings */
+extern int64_t slw_reinit(uint64_t flags);
+
+
+#endif /* __SKIBOOT_H */
+
diff --git a/include/spcn.h b/include/spcn.h
new file mode 100644
index 0000000..1945e8d
--- /dev/null
+++ b/include/spcn.h
@@ -0,0 +1,93 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ */
+
+
+#ifndef __SPCN_H
+#define __SPCN_H
+
+
+/* SPCN commands */
+#define SPCN_CMD_PRS 0x42 /* Power Resource Status */
+#define SPCN_CMD_SET 0x66 /* Set Environmental Thresholds */
+
+/* SPCN command address modes */
+#define SPCN_ADDR_MODE_CEC_NODE 0x0000d000 /* CEC node single destination */
+#define SPCN_ADDR_MODE_ALL_SLAVES 0x0000f000 /* Address all slaves in all racks */
+#define SPCN_ADDR_MODE_RACK_NODES 0x00000000 /* Address rack node in all racks */
+
+/* SPCN PRS command modifiers */
+#define SPCN_MOD_PRS_STATUS_FIRST 0x01 /* Power Resource Status (First 1KB) */
+#define SPCN_MOD_PRS_STATUS_SUBS 0x02 /* Subsequent set of 1KB PRS entries */
+#define SPCN_MOD_PRS_LED_DATA_FIRST 0x51 /* LED data entry (First 1KB) */
+#define SPCN_MOD_PRS_LED_DATA_SUB 0x52 /* Subsequent LED data entries */
+
+/* SPCN SET command modifiers */
+#define SPCN_MOD_SET_LED_CTL_LOC_CODE 0x07 /* Control LED with location code */
+#define SPCN_MOD_SET_IDENTIFY_OFF_ENC 0x08 /* Turn off identify LEDs in CEC */
+#define SPCN_MOD_SET_IDENTIFY_OFF_NODE 0x0B /* Turn off identify LEDs in Node */
+
+/* SPCN SENSOR command modifiers */
+#define SPCN_MOD_SENSOR_PARAM_FIRST 0x10 /* First 1K sensor parameters */
+#define SPCN_MOD_SENSOR_PARAM_SUBS 0x11 /* Subsequent sensor parameters */
+#define SPCN_MOD_SENSOR_DATA_FIRST 0x12 /* First 1K sensor data */
+#define SPCN_MOD_SENSOR_DATA_SUBS 0x13 /* Subsequent sensor data blocks */
+#define SPCN_MOD_PROC_JUNC_TEMP 0x14 /* Process junction temperatures */
+#define SPCN_MOD_SENSOR_POWER 0x1c /* System power consumption */
+#define SPCN_MOD_LAST 0xff
+
+/*
+ * Modifiers 0x53 and 0x54 are used by LEDS at standby. So HV does not come into
+ * the picture here. Do we need those?
+ */
+
+/* Supported SPCN response codes */
+#define LOGICAL_IND_STATE_MASK 0x10 /* If set, control fault state */
+#define ACTIVE_LED_STATE_MASK 0x01 /* If set, switch on the LED */
+#define SPCN_LED_IDENTIFY_MASK 0x80 /* Set identify indicator */
+#define SPCN_LED_FAULT_MASK 0x40 /* Set fault indicator */
+#define SPCN_LED_TRANS_MASK 0x20 /* LED is in transition */
+#define SPCN_CLR_LED_STATE 0x00 /* Reset identify indicator */
+
+/* SPCN command response status codes */
+enum spcn_rsp_status {
+ SPCN_RSP_STATUS_SUCCESS = 0x01, /* Command successful */
+ SPCN_RSP_STATUS_COND_SUCCESS = 0x02, /* Command successful, but additional entries exist */
+ SPCN_RSP_STATUS_INVALID_RACK = 0x15, /* Invalid rack command */
+ SPCN_RSP_STATUS_INVALID_SLAVE = 0x16, /* Invalid slave command */
+ SPCN_RSP_STATUS_INVALID_MOD = 0x18, /* Invalid modifier */
+ SPCN_RSP_STATUS_STATE_PROHIBIT = 0x21, /* Present state prohibits */
+ SPCN_RSP_STATUS_UNKNOWN = 0xff, /* Default state */
+};
+
+/* Sensor FRCs (Frame resource class) */
+enum {
+ SENSOR_FRC_POWER_CTRL = 0x02,
+ SENSOR_FRC_POWER_SUPPLY,
+ SENSOR_FRC_REGULATOR,
+ SENSOR_FRC_COOLING_FAN,
+ SENSOR_FRC_COOLING_CTRL,
+ SENSOR_FRC_BATTERY_CHRG,
+ SENSOR_FRC_BATTERY_PACK,
+ SENSOR_FRC_AMB_TEMP,
+ SENSOR_FRC_TEMP,
+ SENSOR_FRC_VRM,
+ SENSOR_FRC_RISER_CARD,
+ SENSOR_FRC_IO_BP,
+};
+
+#endif /* __SPCN_H */
diff --git a/include/stack.h b/include/stack.h
new file mode 100644
index 0000000..6eedc01
--- /dev/null
+++ b/include/stack.h
@@ -0,0 +1,92 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __STACKFRAME_H
+#define __STACKFRAME_H
+
+#define STACK_ENTRY_OPAL_API 0 /* OPAL call */
+#define STACK_ENTRY_MCHECK 0x0200 /* Machine check */
+#define STACK_ENTRY_HMI 0x0e60 /* Hypervisor maintainance */
+#define STACK_ENTRY_RESET 0x0100 /* System reset */
+#define STACK_ENTRY_SOFTPATCH 0x1500 /* Soft patch (denorm emulation) */
+
+/* Portion of the stack reserved for machine checks */
+#define MC_STACK_SIZE 0x1000
+
+/* Safety/ABI gap at top of stack */
+#define STACK_TOP_GAP 0x100
+
+/* Remaining stack space (gap included) */
+#define NORMAL_STACK_SIZE (STACK_SIZE - MC_STACK_SIZE)
+
+/* Offset to get to normal CPU stacks */
+#define CPU_STACKS_OFFSET (CPU_STACKS_BASE + \
+ NORMAL_STACK_SIZE - STACK_TOP_GAP)
+
+/* Offset to get to machine check CPU stacks */
+#define CPU_MC_STACKS_OFFSET (CPU_STACKS_BASE + STACK_SIZE - STACK_TOP_GAP)
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+
+/* This is the struct used to save GPRs etc.. on OPAL entry
+ * and from some exceptions. It is not always entirely populated
+ * depending on the entry type
+ */
+struct stack_frame {
+ /* Standard 112-byte stack frame header (the minimum size required,
+ * using an 8-doubleword param save area). The callee (in C) may use
+ * lrsave; we declare these here so we don't get our own save area
+ * overwritten */
+ uint64_t backchain;
+ uint64_t crsave;
+ uint64_t lrsave;
+ uint64_t compiler_dw;
+ uint64_t linker_dw;
+ uint64_t tocsave;
+ uint64_t paramsave[8];
+
+ /* Space for stack-local vars used by asm. At present we only use
+ * one doubleword. */
+ uint64_t locals[1];
+
+ /* Entry type */
+ uint64_t type;
+
+ /* GPR save area
+ *
+ * We don't necessarily save everything in here
+ */
+ uint64_t gpr[32];
+
+ /* Other SPR saved
+ *
+ * Only for some exceptions.
+ */
+ uint32_t cr;
+ uint32_t xer;
+ uint64_t ctr;
+ uint64_t lr;
+ uint64_t pc;
+ uint64_t cfar;
+ uint64_t srr0;
+ uint64_t srr1;
+} __attribute__((aligned(16)));
+
+#endif /* __ASSEMBLY__ */
+#endif /* __STACKFRAME_H */
+
diff --git a/include/timebase.h b/include/timebase.h
new file mode 100644
index 0000000..11ab126
--- /dev/null
+++ b/include/timebase.h
@@ -0,0 +1,91 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Timebase helpers.
+ *
+ * Note: Only use after the TODs are in sync !
+ */
+#ifndef __TIME_H
+#define __TIME_H
+
+#include <time.h>
+
+static inline unsigned long mftb(void)
+{
+ unsigned long tb;
+
+ /* We use a memory clobber to avoid this being
+ * moved in the instruction stream
+ */
+ asm volatile("mftb %0" : "=r"(tb) : : "memory");
+ return tb;
+}
+
+enum tb_cmpval {
+ TB_ABEFOREB = -1,
+ TB_AEQUALB = 0,
+ TB_AAFTERB = 1
+};
+
+static inline enum tb_cmpval tb_compare(unsigned long a,
+ unsigned long b)
+{
+ if (a == b)
+ return TB_AEQUALB;
+ return ((long)(b - a)) > 0 ? TB_ABEFOREB : TB_AAFTERB;
+}
+
+/* Architected timebase */
+static const unsigned long tb_hz = 512000000;
+
+static inline unsigned long secs_to_tb(unsigned long secs)
+{
+ return secs * tb_hz;
+}
+
+static inline unsigned long msecs_to_tb(unsigned long msecs)
+{
+ return msecs * (tb_hz / 1000);
+}
+
+static inline unsigned long tb_to_msecs(unsigned long tb)
+{
+ return (tb * 1000) / tb_hz;
+}
+
+static inline unsigned long usecs_to_tb(unsigned long usecs)
+{
+ return usecs * (tb_hz / 1000000);
+}
+
+static inline unsigned long tb_to_usecs(unsigned long tb)
+{
+ return (tb * 1000000) / tb_hz;
+}
+
+extern unsigned long timespec_to_tb(const struct timespec *ts);
+
+/* wait_poll - Wait a certain number of TB ticks while polling FSP */
+extern void time_wait(unsigned long duration);
+
+/* wait_poll_ms - Wait a certain number of milliseconds while polling FSP */
+extern void time_wait_ms(unsigned long ms);
+
+/* wait_poll_us - Wait a certain number of microseconds while polling FSP */
+extern void time_wait_us(unsigned long us);
+
+#endif /* __TIME_H */
diff --git a/include/trace.h b/include/trace.h
new file mode 100644
index 0000000..da43572
--- /dev/null
+++ b/include/trace.h
@@ -0,0 +1,46 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TRACE_H
+#define __TRACE_H
+#include <ccan/short_types/short_types.h>
+#include <stddef.h>
+#include <lock.h>
+#include <trace_types.h>
+
+#define TBUF_SZ (1024 * 1024)
+
+struct cpu_thread;
+
+/* Here's one we prepared earlier. */
+void init_boot_tracebuf(struct cpu_thread *boot_cpu);
+
+struct trace_info {
+ /* Lock for writers. */
+ struct lock lock;
+ /* Exposed to kernel. */
+ struct tracebuf tb;
+};
+
+/* Allocate trace buffers once we know memory topology */
+void init_trace_buffers(void);
+
+/* This will fill in timestamp and cpu; you must do type and len. */
+void trace_add(union trace *trace, u8 type, u16 len);
+
+/* Put trace node into dt. */
+void trace_add_node(void);
+#endif /* __TRACE_H */
diff --git a/include/trace_types.h b/include/trace_types.h
new file mode 100644
index 0000000..d1d3690
--- /dev/null
+++ b/include/trace_types.h
@@ -0,0 +1,132 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* API for kernel to read trace buffer. */
+#ifndef __TRACE_TYPES_H
+#define __TRACE_TYPES_H
+
+#define TRACE_REPEAT 1
+#define TRACE_OVERFLOW 2
+#define TRACE_OPAL 3 /* OPAL call */
+#define TRACE_FSP_MSG 4 /* FSP message sent/received */
+#define TRACE_FSP_EVENT 5 /* FSP driver event */
+#define TRACE_UART 6 /* UART driver traces */
+
+/* One per cpu, plus one for NMIs */
+struct tracebuf {
+ /* Mask to apply to get buffer offset. */
+ u64 mask;
+ /* This where the buffer starts. */
+ u64 start;
+ /* This is where writer has written to. */
+ u64 end;
+ /* This is where the writer wrote to previously. */
+ u64 last;
+ /* This is where the reader is up to. */
+ u64 rpos;
+ /* If the last one we read was a repeat, this shows how many. */
+ u32 last_repeat;
+ /* Maximum possible size of a record. */
+ u32 max_size;
+
+ char buf[/* TBUF_SZ + max_size */];
+};
+
+/* Common header for all trace entries. */
+struct trace_hdr {
+ u64 timestamp;
+ u8 type;
+ u8 len_div_8;
+ u16 cpu;
+ u8 unused[4];
+};
+
+/* Note: all other entries must be at least as large as this! */
+struct trace_repeat {
+ u64 timestamp; /* Last repeat happened at this timestamp */
+ u8 type; /* == TRACE_REPEAT */
+ u8 len_div_8;
+ u16 cpu;
+ u16 prev_len;
+ u16 num; /* Starts at 1, ie. 1 repeat, or two traces. */
+ /* Note that the count can be one short, if read races a repeat. */
+};
+
+/* Overflow is special */
+struct trace_overflow {
+ u64 unused64; /* Timestamp is unused */
+ u8 type; /* == TRACE_OVERFLOW */
+ u8 len_div_8;
+ u8 unused[6]; /* ie. hdr.cpu is indeterminate */
+ u64 bytes_missed;
+};
+
+/* All other trace types have a full header */
+struct trace_opal {
+ struct trace_hdr hdr;
+ u64 token, lr, sp, r3_to_11[9];
+};
+
+#define TRACE_FSP_MSG_IN 0
+#define TRACE_FSP_MSG_OUT 1
+
+struct trace_fsp_msg {
+ struct trace_hdr hdr;
+ u32 word0;
+ u32 word1;
+ u8 dlen;
+ u8 dir; /* TRACE_FSP_MSG_IN or TRACE_FSP_MSG_OUT */
+ u8 data[56]; /* See dlen, but max is 56 bytes. */
+};
+
+#define TRACE_FSP_EVT_LINK_DOWN 0
+#define TRACE_FSP_EVT_DISR_CHG 1 /* 0:disr */
+#define TRACE_FSP_EVT_SOFT_RR 2 /* 0:disr */
+#define TRACE_FSP_EVT_RR_COMPL 3
+#define TRACE_FSP_EVT_HDES_CHG 4 /* 0:hdes */
+#define TRACE_FSP_EVT_POLL_IRQ 5 /* 0:irq? 1:hdir 2:ctl 3:psi_irq */
+
+struct trace_fsp_event {
+ struct trace_hdr hdr;
+ u16 event;
+ u16 fsp_state;
+ u32 data[4]; /* event type specific */
+};
+
+#define TRACE_UART_CTX_IRQ 0
+#define TRACE_UART_CTX_POLL 1
+#define TRACE_UART_CTX_READ 2
+
+struct trace_uart {
+ struct trace_hdr hdr;
+ u8 ctx;
+ u8 cnt;
+ u8 irq_state;
+ u8 unused;
+ u16 in_count;
+};
+
+union trace {
+ struct trace_hdr hdr;
+ /* Trace types go here... */
+ struct trace_repeat repeat;
+ struct trace_overflow overflow;
+ struct trace_opal opal;
+ struct trace_fsp_msg fsp_msg;
+ struct trace_fsp_event fsp_evt;
+ struct trace_uart uart;
+};
+
+#endif /* __TRACE_TYPES_H */
diff --git a/include/types.h b/include/types.h
new file mode 100644
index 0000000..36dc81d
--- /dev/null
+++ b/include/types.h
@@ -0,0 +1,27 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TYPES_H
+#define __TYPES_H
+#include <ccan/short_types/short_types.h>
+
+/* These are currently just for clarity, but we could apply sparse. */
+typedef u16 __be16;
+typedef u32 __be32;
+typedef u64 __be64;
+
+#endif /* __TYPES_H */
+
diff --git a/include/vpd.h b/include/vpd.h
new file mode 100644
index 0000000..c49ab71
--- /dev/null
+++ b/include/vpd.h
@@ -0,0 +1,45 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VPD_H
+#define __VPD_H
+
+struct machine_info {
+ const char *mtm;
+ const char *name;
+};
+
+const struct machine_info *machine_info_lookup(char *mtm);
+
+const void *vpd_find_keyword(const void *rec, size_t rec_sz,
+ const char *kw, uint8_t *kw_size);
+
+const void *vpd_find_record(const void *vpd, size_t vpd_size,
+ const char *record, size_t *sz);
+
+const void *vpd_find(const void *vpd, size_t vpd_size,
+ const char *record, const char *keyword,
+ uint8_t *sz);
+
+/* Add model property to dt_root */
+void add_dtb_model(void);
+
+void vpd_iohub_load(struct dt_node *hub_node);
+
+#define VPD_LOAD_LXRN_VINI 0xff
+
+
+#endif /* __VPD_H */
diff --git a/include/xscom.h b/include/xscom.h
new file mode 100644
index 0000000..2b09a86
--- /dev/null
+++ b/include/xscom.h
@@ -0,0 +1,171 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __XSCOM_H
+#define __XSCOM_H
+
+#include <stdint.h>
+#include <processor.h>
+#include <cpu.h>
+
+/*
+ * SCOM "partID" definitions:
+ *
+ * All Ids are 32-bits long, top nibble is reserved for the
+ * 'type' field:
+ * 0x0 = Processor Chip
+ * 0x8 = Memory Buffer (Centaur) Chip
+ * 0x4 = EX/Core Chiplet
+ *
+ * Processor Chip = Logical Fabric Id = PIR>>7
+ * 0b0000.0000.0000.0000.0000.0000.00NN.NCCC
+ * N=Node, C=Chip
+ * Centaur Chip = Associated Processor Chip with memory channel
+ * appended and flag set
+ * 0b1000.0000.0000.0000.0000.00NN.NCCC.MMMM
+ * N=Node, C=Chip, M=Memory Channel
+ * Processor EX/Core chiplet = PIR >> 3 with flag set
+ * 0b0100.0000.0000.0000.0000.00NN.NCCC.PPPP
+ * N=Node, C=Chip, P=Processor core
+ */
+
+/*
+ * SCOM Address definition extracted from HWPs for documentation
+ * purposes
+ *
+ * "Normal" (legacy) format
+ *
+ * 111111 11112222 22222233 33333333 44444444 44555555 55556666
+ * 01234567 89012345 67890123 45678901 23456789 01234567 89012345 67890123
+ * -------- -------- -------- -------- -------- -------- -------- --------
+ * 00000000 00000000 00000000 00000000 0MCCCCCC ????PPPP 00LLLLLL LLLLLLLL
+ * || | |
+ * || | `-> Local Address*
+ * || |
+ * || `-> Port
+ * ||
+ * |`-> Chiplet ID**
+ * |
+ * `-> Multicast bit
+ *
+ * * Local address is composed of "00" + 4-bit ring + 10-bit ID
+ * The 10-bit ID is usually 4-bit sat_id and 6-bit reg_id
+ *
+ * ** Chiplet ID turns into multicast operation type and group number
+ * if the multicast bit is set
+ *
+ * "Indirect" format
+ *
+ *
+ * 111111 11112222 22222233 33333333 44444444 44555555 55556666
+ * 01234567 89012345 67890123 45678901 23456789 01234567 89012345 67890123
+ * -------- -------- -------- -------- -------- -------- -------- --------
+ * 10000000 0000IIII IIIIIGGG GGGLLLLL 0MCCCCCC ????PPPP 00LLLLLL LLLLLLLL
+ * | | | || | |
+ * | | | || | `-> Local Address*
+ * | | | || |
+ * | | | || `-> Port
+ * | | | ||
+ * | | | |`-> Chiplet ID**
+ * | | | |
+ * | | | `-> Multicast bit
+ * | | |
+ * | | `-> Lane ID
+ * | |
+ * | `-> RX or TX Group ID
+ * |
+ * `-> Indirect Register Address
+ *
+ * * Local address is composed of "00" + 4-bit ring + 4-bit sat_id + "111111"
+ *
+ * ** Chiplet ID turns into multicast operation type and group number
+ * if the multicast bit is set
+ */
+
+/*
+ * Generate a local address from a given ring/satellite/offset
+ * combination:
+ *
+ * Ring Satelite offset
+ * +---------+---------+-------------+
+ * | 4 | 4 | 6 |
+ * +---------+---------+-------------+
+ */
+#define XSCOM_SAT(_r, _s, _o) \
+ (((_r) << 10) | ((_s) << 6) | (_o))
+
+/*
+ * Additional useful definitions
+ */
+#define P8_EX_PCB_SLAVE_BASE 0x100F0000
+
+#define XSCOM_ADDR_P8_EX_SLAVE(core, offset) \
+ (P8_EX_PCB_SLAVE_BASE | (((core) & 0xF) << 24) | ((offset) & 0xFFFF))
+
+#define XSCOM_ADDR_P8_EX(core, addr) \
+ ((((core) & 0xF) << 24) | (addr))
+
+/* Per core power mgt registers */
+#define PM_OHA_MODE_REG 0x1002000D
+
+/* EX slave per-core power mgt slave regisers */
+#define EX_PM_GP0 0x0100
+#define EX_PM_GP1 0x0103
+#define EX_PM_SPECIAL_WAKEUP_FSP 0x010B
+#define EX_PM_SPECIAL_WAKEUP_OCC 0x010C
+#define EX_PM_SPECIAL_WAKEUP_PHYP 0x010D
+#define EX_PM_IDLE_STATE_HISTORY_PHYP 0x0110
+#define EX_PM_IDLE_STATE_HISTORY_FSP 0x0111
+#define EX_PM_IDLE_STATE_HISTORY_OCC 0x0112
+#define EX_PM_IDLE_STATE_HISTORY_PERF 0x0113
+#define EX_PM_CORE_PFET_VRET 0x0130
+#define EX_PM_CORE_ECO_VRET 0x0150
+#define EX_PM_PPMSR 0x0153
+#define EX_PM_PPMCR 0x0159
+
+/* Power mgt bits in GP0 */
+#define EX_PM_GP0_SPECIAL_WAKEUP_DONE PPC_BIT(31)
+
+/* Power mgt settings in GP1 */
+#define EX_PM_SETUP_GP1_FAST_SLEEP 0xD820000000000000ULL
+#define EX_PM_SETUP_GP1_DEEP_SLEEP 0x2420000000000000ULL
+#define EX_PM_SETUP_GP1_PM_SPR_OVERRIDE_EN 0x0010000000000000ULL
+#define EX_PM_SETUP_GP1_DPLL_FREQ_OVERRIDE_EN 0x0020000000000000ULL
+
+/* Fields in history regs */
+#define EX_PM_IDLE_ST_HIST_PM_STATE_MASK PPC_BITMASK(0, 2)
+#define EX_PM_IDLE_ST_HIST_PM_STATE_LSH PPC_BITLSHIFT(2)
+
+
+/*
+ * Error handling:
+ *
+ * Error codes TBD, 0 = success
+ */
+
+/* Targeted SCOM access */
+extern int xscom_read(uint32_t partid, uint64_t pcb_addr, uint64_t *val);
+extern int xscom_write(uint32_t partid, uint64_t pcb_addr, uint64_t val);
+
+/* This chip SCOM access */
+extern int xscom_readme(uint64_t pcb_addr, uint64_t *val);
+extern int xscom_writeme(uint64_t pcb_addr, uint64_t val);
+extern void xscom_init(void);
+
+/* Mark XSCOM lock as being in console path */
+extern void xscom_used_by_console(void);
+
+#endif /* __XSCOM_H */
diff --git a/libc/Makefile.inc b/libc/Makefile.inc
new file mode 100644
index 0000000..7503f09
--- /dev/null
+++ b/libc/Makefile.inc
@@ -0,0 +1,12 @@
+LIBCDIR = libc
+
+SUBDIRS += $(LIBCDIR)
+LIBC = $(LIBCDIR)/built-in.o
+
+include $(SRC)/$(LIBCDIR)/string/Makefile.inc
+include $(SRC)/$(LIBCDIR)/ctype/Makefile.inc
+include $(SRC)/$(LIBCDIR)/stdlib/Makefile.inc
+include $(SRC)/$(LIBCDIR)/stdio/Makefile.inc
+
+$(LIBC): $(STRING) $(CTYPE) $(STDLIB) $(STDIO)
+
diff --git a/libc/README.txt b/libc/README.txt
new file mode 100644
index 0000000..d6c3eb2
--- /dev/null
+++ b/libc/README.txt
@@ -0,0 +1,62 @@
+
+skiboot changes:
+
+ - Added malloc lock
+ - Added a few functions
+ - Malloc will always 4 byte align everything
+ - Added asserts, requires core to provide abort()
+
+WARNINGS:
+
+ - don't use free() on the result of memalign() (should be fixable)
+
+Previous notes:
+
+ Standard C library for the SLOF firmware project
+ ================================================
+
+To use this library, link your target against the "libc.a" archive.
+
+However, there are some prerequisites before you can use certain parts of the
+library:
+
+1) If you want to use malloc() and the like, you have to supply an implemen-
+ tation of sbrk() in your own code. malloc() uses sbrk() to get new, free
+ memory regions.
+
+ Prototype: void *sbrk(int incr);
+ Description: sbrk() increments the available data space by incr bytes and
+ returns a pointer to the start of the new area.
+
+ See the man-page of sbrk for details about this function.
+
+2) Before you can use the stdio output functions like printf(), puts() and the
+ like, you have to provide a standard write() function in your code.
+ printf() and the like use write() to print out the strings to the standard
+ output.
+
+ Prototype: ssize_t write(int fd, const void *buf, size_t cnt);
+ Description: Write cnt byte from the buffer buf to the stream associated
+ with the file descriptor fd.
+
+ The stdio functions will print their output to the stdout channel which is
+ assigned with the file descriptor 1 by default. Note that the stdio
+ functions will not use open() before calling write(), so if the stdout
+ cannel needs to be opened first, you should do that in your start-up code
+ before using the libc functions for the first time.
+
+3) Before you can use the stdio input functions like scanf() and the
+ like, you have to provide a standard read() function in your code.
+ scanf() and the like use read() to get the characters from the standard
+ input.
+
+ Prototype: ssize_t read(int fd, void *buf, size_t cnt);
+ Description: Read cnt byte from the stream associated with the file
+ descriptor fd and put them into the buffer buf.
+
+ The stdio functions will get their input from the stdin channel which is
+ assigned with the file descriptor 0 by default. Note that the stdio
+ functions will not use open() before calling read(), so if the stdin
+ cannel needs to be opened first, you should do that in your start-up code
+ before using the libc functions for the first time.
+
diff --git a/libc/ctype/Makefile.inc b/libc/ctype/Makefile.inc
new file mode 100644
index 0000000..da78c98
--- /dev/null
+++ b/libc/ctype/Makefile.inc
@@ -0,0 +1,19 @@
+# *****************************************************************************
+# * Copyright (c) 2004, 2008 IBM Corporation
+# * All rights reserved.
+# * This program and the accompanying materials
+# * are made available under the terms of the BSD License
+# * which accompanies this distribution, and is available at
+# * http://www.opensource.org/licenses/bsd-license.php
+# *
+# * Contributors:
+# * IBM Corporation - initial implementation
+# ****************************************************************************/
+
+SUBDIRS += $(LIBCDIR)/ctype
+
+CTYPE_OBJS = isdigit.o isprint.o isspace.o isxdigit.o tolower.o toupper.o
+
+CTYPE = $(LIBCDIR)/ctype/built-in.o
+$(CTYPE): $(CTYPE_OBJS:%=$(LIBCDIR)/ctype/%)
+
diff --git a/libc/ctype/isdigit.c b/libc/ctype/isdigit.c
new file mode 100644
index 0000000..68cd4eb
--- /dev/null
+++ b/libc/ctype/isdigit.c
@@ -0,0 +1,26 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <compiler.h>
+#include <ctype.h>
+
+int __attrconst isdigit(int ch)
+{
+ switch (ch) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
diff --git a/libc/ctype/isprint.c b/libc/ctype/isprint.c
new file mode 100644
index 0000000..0a7c94c
--- /dev/null
+++ b/libc/ctype/isprint.c
@@ -0,0 +1,19 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <compiler.h>
+#include <ctype.h>
+
+int __attrconst isprint(int ch)
+{
+ return (ch >= 32 && ch < 127);
+}
diff --git a/libc/ctype/isspace.c b/libc/ctype/isspace.c
new file mode 100644
index 0000000..f9fa36a
--- /dev/null
+++ b/libc/ctype/isspace.c
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <compiler.h>
+#include <ctype.h>
+
+int __attrconst isspace(int ch)
+{
+ switch (ch) {
+ case ' ':
+ case '\f':
+ case '\n':
+ case '\r':
+ case '\t':
+ case '\v':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
diff --git a/libc/ctype/isxdigit.c b/libc/ctype/isxdigit.c
new file mode 100644
index 0000000..d3c7388
--- /dev/null
+++ b/libc/ctype/isxdigit.c
@@ -0,0 +1,22 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <compiler.h>
+#include <ctype.h>
+
+int __attrconst isxdigit(int ch)
+{
+ return (
+ (ch >= '0' && ch <= '9') |
+ (ch >= 'A' && ch <= 'F') |
+ (ch >= 'a' && ch <= 'f') );
+}
diff --git a/libc/ctype/tolower.c b/libc/ctype/tolower.c
new file mode 100644
index 0000000..398a1eb
--- /dev/null
+++ b/libc/ctype/tolower.c
@@ -0,0 +1,19 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <compiler.h>
+#include <ctype.h>
+
+int __attrconst tolower(int c)
+{
+ return (((c >= 'A') && (c <= 'Z')) ? (c - 'A' + 'a' ) : c);
+}
diff --git a/libc/ctype/toupper.c b/libc/ctype/toupper.c
new file mode 100644
index 0000000..6b52363
--- /dev/null
+++ b/libc/ctype/toupper.c
@@ -0,0 +1,21 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <compiler.h>
+#include "ctype.h"
+
+int __attrconst toupper (int cha)
+{
+ if((cha >= 'a') && (cha <= 'z'))
+ return(cha - 'a' + 'A');
+ return(cha);
+}
diff --git a/libc/include/assert.h b/libc/include/assert.h
new file mode 100644
index 0000000..755fc71
--- /dev/null
+++ b/libc/include/assert.h
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008, 2012 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _ASSERT_H
+#define _ASSERT_H
+
+#define assert(cond) \
+ do { if (!(cond)) \
+ assert_fail(__FILE__ \
+ ":" stringify(__LINE__) \
+ ":" stringify(cond)); \
+ } while(0)
+
+void __attribute__((noreturn)) assert_fail(const char *msg);
+
+#define stringify(expr) stringify_1(expr)
+/* Double-indirection required to stringify expansions */
+#define stringify_1(expr) #expr
+
+#endif
diff --git a/libc/include/ctype.h b/libc/include/ctype.h
new file mode 100644
index 0000000..9051a75
--- /dev/null
+++ b/libc/include/ctype.h
@@ -0,0 +1,24 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _CTYPE_H
+#define _CTYPE_H
+
+int isdigit(int c);
+int isxdigit(int c);
+int isprint(int c);
+int isspace(int c);
+
+int tolower(int c);
+int toupper(int c);
+
+#endif
diff --git a/libc/include/errno.h b/libc/include/errno.h
new file mode 100644
index 0000000..d585934
--- /dev/null
+++ b/libc/include/errno.h
@@ -0,0 +1,34 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _ERRNO_H
+#define _ERRNO_H
+
+extern int errno;
+
+/*
+ * Error number definitions
+ */
+#define EPERM 1 /* not permitted */
+#define ENOENT 2 /* file or directory not found */
+#define EIO 5 /* input/output error */
+#define ENOMEM 12 /* not enough space */
+#define EACCES 13 /* permission denied */
+#define EFAULT 14 /* bad address */
+#define EBUSY 16 /* resource busy */
+#define EEXIST 17 /* file already exists */
+#define ENODEV 19 /* device not found */
+#define EINVAL 22 /* invalid argument */
+#define EDOM 33 /* math argument out of domain of func */
+#define ERANGE 34 /* math result not representable */
+
+#endif
diff --git a/libc/include/getopt.h b/libc/include/getopt.h
new file mode 100644
index 0000000..5956986
--- /dev/null
+++ b/libc/include/getopt.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef GETOPT_H
+#define GETOPT_H
+
+extern char *optarg;
+extern int optind;
+extern int opterr;
+extern int optopt;
+
+struct option {
+ const char *name;
+ int has_arg;
+ int *flag;
+ int val;
+};
+
+enum {
+ no_argument = 0,
+ required_argument,
+ optional_argument
+};
+
+int getopt(int argc, char **, const char *);
+int getopt_long(int argc, char **, const char *, const struct option *, int *);
+
+#endif /* GETOPT_H */
diff --git a/libc/include/limits.h b/libc/include/limits.h
new file mode 100644
index 0000000..d7f8d1a
--- /dev/null
+++ b/libc/include/limits.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _LIMITS_H
+#define _LIMITS_H
+
+#define UCHAR_MAX 255
+#define SCHAR_MAX 127
+#define SCHAR_MIN (-128)
+
+#define USHRT_MAX 65535
+#define SHRT_MAX 32767
+#define SHRT_MIN (-32768)
+
+#define UINT_MAX (4294967295U)
+#define INT_MAX 2147483647
+#define INT_MIN (-2147483648)
+
+#define ULONG_MAX ((unsigned long)-1L)
+#define LONG_MAX (ULONG_MAX/2)
+#define LONG_MIN ((-LONG_MAX)-1)
+
+#define CHAR_BIT 8
+#endif
diff --git a/libc/include/stdint.h b/libc/include/stdint.h
new file mode 100644
index 0000000..2a2c1d9
--- /dev/null
+++ b/libc/include/stdint.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _STDINT_H
+#define _STDINT_H
+
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+
+typedef unsigned short uint16_t;
+typedef signed short int16_t;
+
+typedef unsigned int uint32_t;
+typedef signed int int32_t;
+
+typedef unsigned long long uint64_t;
+typedef signed long long int64_t;
+
+typedef unsigned long int uintptr_t;
+
+#endif
diff --git a/libc/include/stdio.h b/libc/include/stdio.h
new file mode 100644
index 0000000..57d655a
--- /dev/null
+++ b/libc/include/stdio.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _STDIO_H
+#define _STDIO_H
+
+#include <stdarg.h>
+#include "stddef.h"
+
+#define EOF (-1)
+
+#define _IONBF 0
+#define _IOLBF 1
+#define _IOFBF 2
+#define BUFSIZ 80
+
+typedef struct {
+ int fd;
+ int mode;
+ int pos;
+ char *buf;
+ int bufsiz;
+} FILE;
+
+extern FILE stdin_data;
+extern FILE stdout_data;
+extern FILE stderr_data;
+
+#define stdin (&stdin_data)
+#define stdout (&stdout_data)
+#define stderr (&stderr_data)
+
+int fileno(FILE *stream);
+int printf(const char *format, ...) __attribute__((format (printf, 1, 2)));
+int fprintf(FILE *stream, const char *format, ...) __attribute__((format (printf, 2, 3)));
+int sprintf(char *str, const char *format, ...) __attribute__((format (printf, 2, 3)));
+int snprintf(char *str, size_t size, const char *format, ...) __attribute__((format (printf, 3, 4)));
+int vfprintf(FILE *stream, const char *format, va_list);
+int vsprintf(char *str, const char *format, va_list);
+int vsnprintf(char *str, size_t size, const char *format, va_list);
+void setbuf(FILE *stream, char *buf);
+int setvbuf(FILE *stream, char *buf, int mode , size_t size);
+
+int fputc(int ch, FILE *stream);
+#define putc(ch, stream) fputc(ch, stream)
+int putchar(int ch);
+int puts(const char *str);
+int fputs(const char *str, FILE *stream);
+
+int scanf(const char *format, ...) __attribute__((format (scanf, 1, 2)));
+int fscanf(FILE *stream, const char *format, ...) __attribute__((format (scanf, 2, 3)));
+int vfscanf(FILE *stream, const char *format, va_list);
+int vsscanf(const char *str, const char *format, va_list);
+int getc(FILE *stream);
+int getchar(void);
+
+#endif
diff --git a/libc/include/stdlib.h b/libc/include/stdlib.h
new file mode 100644
index 0000000..4ab2825
--- /dev/null
+++ b/libc/include/stdlib.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _STDLIB_H
+#define _STDLIB_H
+
+#include "stddef.h"
+
+#define RAND_MAX 32767
+
+#include "mem_region-malloc.h"
+
+int atoi(const char *str);
+long atol(const char *str);
+unsigned long int strtoul(const char *nptr, char **endptr, int base);
+long int strtol(const char *nptr, char **endptr, int base);
+
+int rand(void);
+void __attribute__((noreturn)) abort(void);
+
+#endif
diff --git a/libc/include/string.h b/libc/include/string.h
new file mode 100644
index 0000000..96b26fa
--- /dev/null
+++ b/libc/include/string.h
@@ -0,0 +1,38 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _STRING_H
+#define _STRING_H
+
+#include "stddef.h"
+
+char *strcpy(char *dest, const char *src);
+char *strncpy(char *dest, const char *src, size_t n);
+char *strcat(char *dest, const char *src);
+int strcmp(const char *s1, const char *s2);
+int strncmp(const char *s1, const char *s2, size_t n);
+int strcasecmp(const char *s1, const char *s2);
+int strncasecmp(const char *s1, const char *s2, size_t n);
+char *strchr(const char *s, int c);
+char *strrchr(const char *s, int c);
+size_t strlen(const char *s);
+char *strstr(const char *hay, const char *needle);
+char *strtok(char *src, const char *pattern);
+char *strdup(const char *src);
+
+void *memset(void *s, int c, size_t n);
+void *memchr(const void *s, int c, size_t n);
+void *memcpy(void *dest, const void *src, size_t n);
+void *memmove(void *dest, const void *src, size_t n);
+int memcmp(const void *s1, const void *s2, size_t n);
+
+#endif
diff --git a/libc/include/time.h b/libc/include/time.h
new file mode 100644
index 0000000..807023b
--- /dev/null
+++ b/libc/include/time.h
@@ -0,0 +1,42 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+#ifndef _TIME_H
+#define _TIME_H
+
+struct tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+
+ /* unused in skiboot */
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+};
+
+typedef long time_t;
+
+struct timespec {
+ time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+
+/* Not implemented by libc but by hosting environment, however
+ * this is where the prototype is expected
+ */
+int nanosleep(const struct timespec *req, struct timespec *rem);
+
+#endif /* _TIME_H */
+
diff --git a/libc/include/unistd.h b/libc/include/unistd.h
new file mode 100644
index 0000000..bc53472
--- /dev/null
+++ b/libc/include/unistd.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#ifndef _UNISTD_H
+#define _UNISTD_H
+
+#include <stddef.h>
+
+typedef long ssize_t;
+
+extern int open(const char *name, int flags);
+extern int close(int fd);
+extern ssize_t read(int fd, void *buf, size_t count);
+extern ssize_t write(int fd, const void *buf, size_t count);
+extern ssize_t lseek(int fd, long offset, int whence);
+
+#endif
diff --git a/libc/stdio/Makefile.inc b/libc/stdio/Makefile.inc
new file mode 100644
index 0000000..62276f1
--- /dev/null
+++ b/libc/stdio/Makefile.inc
@@ -0,0 +1,21 @@
+# *****************************************************************************
+# * Copyright (c) 2004, 2008 IBM Corporation
+# * All rights reserved.
+# * This program and the accompanying materials
+# * are made available under the terms of the BSD License
+# * which accompanies this distribution, and is available at
+# * http://www.opensource.org/licenses/bsd-license.php
+# *
+# * Contributors:
+# * IBM Corporation - initial implementation
+# ****************************************************************************/
+
+
+SUBDIRS += $(LIBCDIR)/stdio
+
+STDIO_OBJS = fscanf.o sprintf.o vfprintf.o vsnprintf.o vsprintf.o fprintf.o \
+ printf.o setvbuf.o fputc.o puts.o fputs.o putchar.o scanf.o \
+ stdchnls.o vfscanf.o vsscanf.o fileno.o snprintf.o
+
+STDIO = $(LIBCDIR)/stdio/built-in.o
+$(STDIO): $(STDIO_OBJS:%=$(LIBCDIR)/stdio/%)
diff --git a/libc/stdio/fileno.c b/libc/stdio/fileno.c
new file mode 100644
index 0000000..6e23951
--- /dev/null
+++ b/libc/stdio/fileno.c
@@ -0,0 +1,19 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdio.h>
+
+int
+fileno(FILE *stream)
+{
+ return stream->fd;
+}
diff --git a/libc/stdio/fprintf.c b/libc/stdio/fprintf.c
new file mode 100644
index 0000000..866df39
--- /dev/null
+++ b/libc/stdio/fprintf.c
@@ -0,0 +1,26 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "stdio.h"
+
+
+int fprintf(FILE *stream, const char* fmt, ...)
+{
+ int count;
+ va_list ap;
+
+ va_start(ap, fmt);
+ count = vfprintf(stream, fmt, ap);
+ va_end(ap);
+
+ return count;
+}
diff --git a/libc/stdio/fputc.c b/libc/stdio/fputc.c
new file mode 100644
index 0000000..6e9e7c9
--- /dev/null
+++ b/libc/stdio/fputc.c
@@ -0,0 +1,24 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "stdio.h"
+#include "unistd.h"
+
+int fputc(int ch, FILE *stream)
+{
+ unsigned char outchar = ch;
+
+ if (write(stream->fd, &outchar, 1) == 1)
+ return outchar;
+ else
+ return EOF;
+}
diff --git a/libc/stdio/fputs.c b/libc/stdio/fputs.c
new file mode 100644
index 0000000..c22329c
--- /dev/null
+++ b/libc/stdio/fputs.c
@@ -0,0 +1,23 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+
+#include "stdio.h"
+#include "string.h"
+#include "unistd.h"
+
+
+int fputs(const char *str, FILE *stream)
+{
+ return write(stream->fd, str, strlen(str));
+}
+
diff --git a/libc/stdio/fscanf.c b/libc/stdio/fscanf.c
new file mode 100644
index 0000000..321b163
--- /dev/null
+++ b/libc/stdio/fscanf.c
@@ -0,0 +1,26 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdio.h>
+
+int
+fscanf(FILE *stream, const char *fmt, ...)
+{
+ int count;
+ va_list ap;
+
+ va_start(ap, fmt);
+ count = vfscanf(stream, fmt, ap);
+ va_end(ap);
+
+ return count;
+}
diff --git a/libc/stdio/printf.c b/libc/stdio/printf.c
new file mode 100644
index 0000000..01f4592
--- /dev/null
+++ b/libc/stdio/printf.c
@@ -0,0 +1,27 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "stdio.h"
+
+
+int printf(const char* fmt, ...)
+{
+ int count;
+ va_list ap;
+
+ va_start(ap, fmt);
+ count = vfprintf(stdout, fmt, ap);
+ va_end(ap);
+
+ return count;
+}
+
diff --git a/libc/stdio/putchar.c b/libc/stdio/putchar.c
new file mode 100644
index 0000000..5c750d9
--- /dev/null
+++ b/libc/stdio/putchar.c
@@ -0,0 +1,21 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+
+#include "stdio.h"
+
+
+int
+putchar(int ch)
+{
+ return putc(ch, stdout);
+}
diff --git a/libc/stdio/puts.c b/libc/stdio/puts.c
new file mode 100644
index 0000000..85d9d31
--- /dev/null
+++ b/libc/stdio/puts.c
@@ -0,0 +1,27 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+
+#include "stdio.h"
+#include "string.h"
+#include "unistd.h"
+
+
+int puts(const char *str)
+{
+ int ret;
+
+ ret = write(stdout->fd, str, strlen(str));
+ write(stdout->fd, "\n", 1);
+
+ return ret + 1;
+}
diff --git a/libc/stdio/scanf.c b/libc/stdio/scanf.c
new file mode 100644
index 0000000..96b6399
--- /dev/null
+++ b/libc/stdio/scanf.c
@@ -0,0 +1,26 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdio.h>
+
+int
+scanf(const char *fmt, ...)
+{
+ int count;
+ va_list ap;
+
+ va_start(ap, fmt);
+ count = vfscanf(stdin, fmt, ap);
+ va_end(ap);
+
+ return count;
+}
diff --git a/libc/stdio/setvbuf.c b/libc/stdio/setvbuf.c
new file mode 100644
index 0000000..9b62dd8
--- /dev/null
+++ b/libc/stdio/setvbuf.c
@@ -0,0 +1,28 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdio.h>
+
+int setvbuf(FILE *stream, char *buf, int mode , size_t size)
+{
+ if (mode != _IONBF && mode != _IOLBF && mode != _IOFBF)
+ return -1;
+ stream->buf = buf;
+ stream->mode = mode;
+ stream->bufsiz = size;
+ return 0;
+}
+
+void setbuf(FILE *stream, char *buf)
+{
+ setvbuf(stream, buf, buf ? _IOFBF : _IONBF, BUFSIZ);
+}
diff --git a/libc/stdio/snprintf.c b/libc/stdio/snprintf.c
new file mode 100644
index 0000000..cc1cc0f
--- /dev/null
+++ b/libc/stdio/snprintf.c
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdio.h>
+
+
+int snprintf(char *buff, size_t size, const char *format, ...)
+{
+ va_list ar;
+ int count;
+
+ if ((buff==NULL) || (format==NULL))
+ return(-1);
+
+ va_start(ar, format);
+ count = vsnprintf(buff, size, format, ar);
+ va_end(ar);
+
+ return(count);
+}
+
diff --git a/libc/stdio/sprintf.c b/libc/stdio/sprintf.c
new file mode 100644
index 0000000..9c4540e
--- /dev/null
+++ b/libc/stdio/sprintf.c
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdio.h>
+
+
+int sprintf(char *buff, const char *format, ...)
+{
+ va_list ar;
+ int count;
+
+ if ((buff==NULL) || (format==NULL))
+ return(-1);
+
+ va_start(ar, format);
+ count = vsprintf(buff, format, ar);
+ va_end(ar);
+
+ return(count);
+}
+
diff --git a/libc/stdio/stdchnls.c b/libc/stdio/stdchnls.c
new file mode 100644
index 0000000..41ed958
--- /dev/null
+++ b/libc/stdio/stdchnls.c
@@ -0,0 +1,23 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+
+#include "stdio.h"
+
+static char stdin_buffer[BUFSIZ], stdout_buffer[BUFSIZ];
+
+FILE stdin_data = { .fd = 0, .mode = _IOLBF, .pos = 0,
+ .buf = stdin_buffer, .bufsiz = BUFSIZ };
+FILE stdout_data = { .fd = 1, .mode = _IOLBF, .pos = 0,
+ .buf = stdout_buffer, .bufsiz = BUFSIZ };
+FILE stderr_data = { .fd = 2, .mode = _IONBF, .pos = 0,
+ .buf = NULL, .bufsiz = 0 };
diff --git a/libc/stdio/vfprintf.c b/libc/stdio/vfprintf.c
new file mode 100644
index 0000000..765feea
--- /dev/null
+++ b/libc/stdio/vfprintf.c
@@ -0,0 +1,27 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "stdio.h"
+#include "unistd.h"
+
+
+int vfprintf(FILE *stream, const char *fmt, va_list ap)
+{
+ int count;
+ char buffer[320];
+
+ count = vsnprintf(buffer, sizeof(buffer), fmt, ap);
+ write(stream->fd, buffer, count);
+
+ return count;
+}
+
diff --git a/libc/stdio/vfscanf.c b/libc/stdio/vfscanf.c
new file mode 100644
index 0000000..85ca8be
--- /dev/null
+++ b/libc/stdio/vfscanf.c
@@ -0,0 +1,269 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "string.h"
+#include "ctype.h"
+#include "stdlib.h"
+#include "stdio.h"
+#include "unistd.h"
+
+
+static int
+_getc(FILE * stream)
+{
+ int count;
+ char c;
+
+ if (stream->mode == _IONBF || stream->buf == NULL) {
+ if (read(stream->fd, &c, 1) == 1)
+ return (int) c;
+ else
+ return EOF;
+ }
+
+ if (stream->pos == 0 || stream->pos >= BUFSIZ ||
+ stream->buf[stream->pos] == '\0') {
+ count = read(stream->fd, stream->buf, BUFSIZ);
+ if (count < 0)
+ count = 0;
+ if (count < BUFSIZ)
+ stream->buf[count] = '\0';
+ stream->pos = 0;
+ }
+
+ return stream->buf[stream->pos++];
+}
+
+static void
+_ungetc(int ch, FILE * stream)
+{
+ if (stream->mode != _IONBF && stream->pos > 0) {
+ if (stream->pos < BUFSIZ)
+ stream->buf[stream->pos] = ch;
+ stream->pos--;
+ }
+}
+
+static int
+_is_voidage(int ch)
+{
+ if (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' || ch == '\0')
+ return 1;
+ else
+ return 0;
+}
+
+
+static int
+_scanf(FILE * stream, const char *fmt, va_list * ap)
+{
+ int i = 0;
+ int length = 0;
+
+ fmt++;
+
+ while (*fmt != '\0') {
+
+ char tbuf[256];
+ char ch;
+
+ switch (*fmt) {
+ case 'd':
+ case 'i':
+ ch = _getc(stream);
+ if (length == 0) {
+ while (!_is_voidage(ch) && isdigit(ch)) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ } else {
+ while (!_is_voidage(ch) && i < length
+ && isdigit(ch)) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ }
+ /* We tried to understand what this is good for...
+ * but we did not. We know for sure that it does not
+ * work on SLOF if this is active. */
+ /* _ungetc(ch, stream); */
+ tbuf[i] = '\0';
+
+ /* ch = _getc(stream); */
+ if (!_is_voidage(ch))
+ _ungetc(ch, stream);
+
+ if (strlen(tbuf) == 0)
+ return 0;
+
+ *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 10);
+ break;
+ case 'X':
+ case 'x':
+ ch = _getc(stream);
+ if (length == 0) {
+ while (!_is_voidage(ch) && isxdigit(ch)) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ } else {
+ while (!_is_voidage(ch) && i < length
+ && isxdigit(ch)) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ }
+ /* _ungetc(ch, stream); */
+ tbuf[i] = '\0';
+
+ /* ch = _getc(stream); */
+ if (!_is_voidage(ch))
+ _ungetc(ch, stream);
+
+ if (strlen(tbuf) == 0)
+ return 0;
+
+ *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 16);
+ break;
+ case 'O':
+ case 'o':
+ ch = _getc(stream);
+ if (length == 0) {
+ while (!_is_voidage(ch)
+ && !(ch < '0' || ch > '7')) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ } else {
+ while (!_is_voidage(ch) && i < length
+ && !(ch < '0' || ch > '7')) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ }
+ /* _ungetc(ch, stream); */
+ tbuf[i] = '\0';
+
+ /* ch = _getc(stream); */
+ if (!_is_voidage(ch))
+ _ungetc(ch, stream);
+
+ if (strlen(tbuf) == 0)
+ return 0;
+
+ *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 8);
+ break;
+ case 'c':
+ ch = _getc(stream);
+ while (_is_voidage(ch))
+ ch = _getc(stream);
+
+ *(va_arg(*ap, char *)) = ch;
+
+ ch = _getc(stream);
+ if (!_is_voidage(ch))
+ _ungetc(ch, stream);
+
+ break;
+ case 's':
+ ch = _getc(stream);
+ if (length == 0) {
+ while (!_is_voidage(ch)) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ } else {
+ while (!_is_voidage(ch) && i < length) {
+ tbuf[i] = ch;
+ ch = _getc(stream);
+ i++;
+ }
+ }
+ /* _ungetc(ch, stream); */
+ tbuf[i] = '\0';
+
+ /* ch = _getc(stream); */
+ if (!_is_voidage(ch))
+ _ungetc(ch, stream);
+
+ strcpy(va_arg(*ap, char *), tbuf);
+ break;
+ default:
+ if (*fmt >= '0' && *fmt <= '9')
+ length += *fmt - '0';
+ break;
+ }
+ fmt++;
+ }
+
+ return 1;
+}
+
+
+
+int
+vfscanf(FILE * stream, const char *fmt, va_list ap)
+{
+ int args = 0;
+
+ while (*fmt != '\0') {
+
+ if (*fmt == '%') {
+
+ char formstr[20];
+ int i = 0;
+
+ do {
+ formstr[i] = *fmt;
+ fmt++;
+ i++;
+ } while (!
+ (*fmt == 'd' || *fmt == 'i' || *fmt == 'x'
+ || *fmt == 'X' || *fmt == 'p' || *fmt == 'c'
+ || *fmt == 's' || *fmt == '%' || *fmt == 'O'
+ || *fmt == 'o'));
+ formstr[i++] = *fmt;
+ formstr[i] = '\0';
+ if (*fmt != '%') {
+ if (_scanf(stream, formstr, &ap) <= 0)
+ return args;
+ else
+ args++;
+ }
+
+ }
+
+ fmt++;
+
+ }
+
+ return args;
+}
+
+int
+getc(FILE * stream)
+{
+ return _getc(stream);
+}
+
+int
+getchar(void)
+{
+ return _getc(stdin);
+}
diff --git a/libc/stdio/vsnprintf.c b/libc/stdio/vsnprintf.c
new file mode 100644
index 0000000..d1cd4e3
--- /dev/null
+++ b/libc/stdio/vsnprintf.c
@@ -0,0 +1,246 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdbool.h>
+#include <compiler.h>
+#include "stdio.h"
+#include "stdlib.h"
+#include "string.h"
+#include "ctype.h"
+
+static const unsigned long long convert[] = {
+ 0x0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFFFFULL, 0xFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL
+};
+
+
+
+static int
+print_itoa(char **buffer, unsigned long value, unsigned short base, bool upper)
+{
+ const char zeichen[] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
+ char c;
+
+ if(base <= 2 || base > 16)
+ return 0;
+
+ if (value < base) {
+ c = zeichen[value];
+ if (upper)
+ c = toupper(c);
+ **buffer = c;
+ *buffer += 1;
+ } else {
+ print_itoa(buffer, value / base, base, upper);
+ c = zeichen[value % base];
+ if (upper)
+ c = toupper(c);
+ **buffer = c;
+ *buffer += 1;
+ }
+
+ return 1;
+}
+
+
+static unsigned int __attrconst
+print_intlen(unsigned long value, unsigned short int base)
+{
+ int i = 0;
+
+ while(value > 0) {
+ value /= base;
+ i++;
+ }
+ if(i == 0) i = 1;
+ return i;
+}
+
+
+static int
+print_fill(char **buffer, char *sizec, unsigned long size, unsigned short int base, char c, int optlen)
+{
+ int i, sizei, len;
+
+ sizei = strtoul(sizec, NULL, 10);
+ len = print_intlen(size, base) + optlen;
+ if(sizei > len) {
+ for(i = 0; i < (sizei - len); i++) {
+ **buffer = c;
+ *buffer += 1;
+ }
+ }
+
+ return 0;
+}
+
+
+static int
+print_format(char **buffer, const char *format, void *var)
+{
+ unsigned long start;
+ unsigned int i = 0, sizei = 0, len = 0, length_mod = sizeof(int);
+ unsigned long value = 0;
+ unsigned long signBit;
+ char *form, sizec[32];
+ char sign = ' ';
+ bool upper = false;
+
+ form = (char *) format;
+ start = (unsigned long) *buffer;
+
+ form++;
+ if(*form == '0' || *form == '.') {
+ sign = '0';
+ form++;
+ }
+
+ while(*form != '\0') {
+ switch(*form) {
+ case 'u':
+ case 'd':
+ case 'i':
+ sizec[i] = '\0';
+ value = (unsigned long) var;
+ signBit = 0x1ULL << (length_mod * 8 - 1);
+ if ((*form != 'u') && (signBit & value)) {
+ **buffer = '-';
+ *buffer += 1;
+ value = (-(unsigned long)value) & convert[length_mod];
+ }
+ print_fill(buffer, sizec, value, 10, sign, 0);
+ print_itoa(buffer, value, 10, upper);
+ break;
+ case 'X':
+ upper = true;
+ case 'x':
+ sizec[i] = '\0';
+ value = (unsigned long) var & convert[length_mod];
+ print_fill(buffer, sizec, value, 16, sign, 0);
+ print_itoa(buffer, value, 16, upper);
+ break;
+ case 'O':
+ case 'o':
+ sizec[i] = '\0';
+ value = (long int) var & convert[length_mod];
+ print_fill(buffer, sizec, value, 8, sign, 0);
+ print_itoa(buffer, value, 8, upper);
+ break;
+ case 'p':
+ sizec[i] = '\0';
+ print_fill(buffer, sizec, (unsigned long) var, 16, ' ', 2);
+ **buffer = '0';
+ *buffer += 1;
+ **buffer = 'x';
+ *buffer += 1;
+ print_itoa(buffer,(unsigned long) var, 16, upper);
+ break;
+ case 'c':
+ sizec[i] = '\0';
+ print_fill(buffer, sizec, 1, 10, ' ', 0);
+ **buffer = (unsigned long) var;
+ *buffer += 1;
+ break;
+ case 's':
+ sizec[i] = '\0';
+ sizei = strtoul(sizec, NULL, 10);
+ len = strlen((char *) var);
+ if(sizei > len) {
+ for(i = 0; i < (sizei - len); i++) {
+ **buffer = ' ';
+ *buffer += 1;
+ }
+ }
+ for(i = 0; i < strlen((char *) var); i++) {
+ **buffer = ((char *) var)[i];
+ *buffer += 1;
+ }
+ break;
+ case 'l':
+ form++;
+ if(*form == 'l') {
+ length_mod = sizeof(long long int);
+ } else {
+ form--;
+ length_mod = sizeof(long int);
+ }
+ break;
+ case 'h':
+ form++;
+ if(*form == 'h') {
+ length_mod = sizeof(signed char);
+ } else {
+ form--;
+ length_mod = sizeof(short int);
+ }
+ break;
+ case 'z':
+ length_mod = sizeof(size_t);
+ break;
+ default:
+ if(*form >= '0' && *form <= '9')
+ sizec[i++] = *form;
+ }
+ form++;
+ }
+
+
+ return (long int) (*buffer - start);
+}
+
+
+/*
+ * The vsnprintf function prints a formated strings into a buffer.
+ * BUG: buffer size checking does not fully work yet
+ */
+int
+vsnprintf(char *buffer, size_t bufsize, const char *format, va_list arg)
+{
+ char *ptr, *bstart;
+
+ bstart = buffer;
+ ptr = (char *) format;
+
+ while(*ptr != '\0' && (buffer - bstart) < bufsize)
+ {
+ if(*ptr == '%') {
+ char formstr[20];
+ int i=0;
+
+ do {
+ formstr[i] = *ptr;
+ ptr++;
+ i++;
+ } while(!(*ptr == 'd' || *ptr == 'i' || *ptr == 'u' || *ptr == 'x' || *ptr == 'X'
+ || *ptr == 'p' || *ptr == 'c' || *ptr == 's' || *ptr == '%'
+ || *ptr == 'O' || *ptr == 'o' ));
+ formstr[i++] = *ptr;
+ formstr[i] = '\0';
+ if(*ptr == '%') {
+ *buffer++ = '%';
+ } else {
+ print_format(&buffer, formstr, va_arg(arg, void *));
+ }
+ ptr++;
+ } else {
+
+ *buffer = *ptr;
+
+ buffer++;
+ ptr++;
+ }
+ }
+
+ *buffer = '\0';
+
+ return (buffer - bstart);
+}
diff --git a/libc/stdio/vsprintf.c b/libc/stdio/vsprintf.c
new file mode 100644
index 0000000..0dfd737
--- /dev/null
+++ b/libc/stdio/vsprintf.c
@@ -0,0 +1,19 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "stdio.h"
+
+int
+vsprintf(char *buffer, const char *format, va_list arg)
+{
+ return vsnprintf(buffer, 0x7fffffff, format, arg);
+}
diff --git a/libc/stdio/vsscanf.c b/libc/stdio/vsscanf.c
new file mode 100644
index 0000000..b9603e9
--- /dev/null
+++ b/libc/stdio/vsscanf.c
@@ -0,0 +1,131 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "stdio.h"
+#include "stdlib.h"
+#include "string.h"
+
+
+static void
+_scanf(const char **buffer, const char *fmt, va_list *ap)
+{
+ int i;
+ int length = 0;
+
+ fmt++;
+
+ while(*fmt != '\0') {
+
+ char tbuf[256];
+
+ switch(*fmt) {
+ case 'd':
+ case 'i':
+ if(length == 0) length = 256;
+
+ for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
+ tbuf[i] = **buffer;
+ *buffer += 1;
+ }
+ tbuf[i] = '\0';
+
+ *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 10);
+ break;
+ case 'X':
+ case 'x':
+ if(length == 0) length = 256;
+
+ for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
+ tbuf[i] = **buffer;
+ *buffer += 1;
+ }
+ tbuf[i] = '\0';
+
+ *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 16);
+ break;
+ case 'O':
+ case 'o':
+ if(length == 0) length = 256;
+
+ for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
+ tbuf[i] = **buffer;
+ *buffer += 1;
+ }
+ tbuf[i] = '\0';
+
+ *(va_arg(*ap, int *)) = strtol(tbuf, NULL, 8);
+ break;
+ case 'c':
+ *(va_arg(*ap, char *)) = **buffer;
+ *buffer += 1;
+ if(length > 1)
+ for(i = 1; i < length; i++)
+ *buffer += 1;
+ break;
+ case 's':
+ if(length == 0) length = 256;
+
+ for(i = 0; **buffer != ' ' && **buffer != '\t' && **buffer != '\n' && i < length; i++) {
+ tbuf[i] = **buffer;
+ *buffer += 1;
+ }
+
+ tbuf[i] = '\0';
+
+ strcpy(va_arg(*ap, char *), tbuf);
+ break;
+ default:
+ if(*fmt >= '0' && *fmt <= '9')
+ length += *fmt - '0';
+ break;
+ }
+ fmt++;
+ }
+
+}
+
+
+int
+vsscanf(const char *buffer, const char *fmt, va_list ap)
+{
+
+ while(*fmt != '\0') {
+
+ if(*fmt == '%') {
+
+ char formstr[20];
+ int i=0;
+
+ do {
+ formstr[i] = *fmt;
+ fmt++;
+ i++;
+ } while(!(*fmt == 'd' || *fmt == 'i' || *fmt == 'x' || *fmt == 'X'
+ || *fmt == 'p' || *fmt == 'c' || *fmt == 's' || *fmt == '%'
+ || *fmt == 'O' || *fmt == 'o' ));
+ formstr[i++] = *fmt;
+ formstr[i] = '\0';
+ if(*fmt != '%') {
+ while(*buffer == ' ' || *buffer == '\t' || *buffer == '\n')
+ buffer++;
+ _scanf(&buffer, formstr, &ap);
+ }
+
+ }
+
+ fmt++;
+
+ }
+
+ return 0;
+}
+
diff --git a/libc/stdlib/Makefile.inc b/libc/stdlib/Makefile.inc
new file mode 100644
index 0000000..473cbfc
--- /dev/null
+++ b/libc/stdlib/Makefile.inc
@@ -0,0 +1,20 @@
+# *****************************************************************************
+# * Copyright (c) 2004, 2008 IBM Corporation
+# * All rights reserved.
+# * This program and the accompanying materials
+# * are made available under the terms of the BSD License
+# * which accompanies this distribution, and is available at
+# * http://www.opensource.org/licenses/bsd-license.php
+# *
+# * Contributors:
+# * IBM Corporation - initial implementation
+# ****************************************************************************/
+
+SUBDIRS += $(LIBCDIR)/stdlib
+
+STDLIB_OBJS = error.o atoi.o atol.o strtol.o strtoul.o \
+ rand.o abort.o
+
+STDLIB = $(LIBCDIR)/stdlib/built-in.o
+$(STDLIB): $(STDLIB_OBJS:%=$(LIBCDIR)/stdlib/%)
+
diff --git a/libc/stdlib/abort.c b/libc/stdlib/abort.c
new file mode 100644
index 0000000..9c5ea47
--- /dev/null
+++ b/libc/stdlib/abort.c
@@ -0,0 +1,23 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008, 2012 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
+void assert_fail(const char *msg)
+{
+ fputs("Assert fail:", stderr);
+ fputs(msg, stderr);
+ fputs("\n", stderr);
+ abort();
+}
diff --git a/libc/stdlib/atoi.c b/libc/stdlib/atoi.c
new file mode 100644
index 0000000..d2fb33b
--- /dev/null
+++ b/libc/stdlib/atoi.c
@@ -0,0 +1,18 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdlib.h>
+
+int atoi(const char *str)
+{
+ return strtol(str, NULL, 0);
+}
diff --git a/libc/stdlib/atol.c b/libc/stdlib/atol.c
new file mode 100644
index 0000000..a6aa47b
--- /dev/null
+++ b/libc/stdlib/atol.c
@@ -0,0 +1,18 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdlib.h>
+
+long atol(const char *str)
+{
+ return strtol(str, NULL, 0);
+}
diff --git a/libc/stdlib/error.c b/libc/stdlib/error.c
new file mode 100644
index 0000000..81020ca
--- /dev/null
+++ b/libc/stdlib/error.c
@@ -0,0 +1,15 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+
+int errno;
+
diff --git a/libc/stdlib/rand.c b/libc/stdlib/rand.c
new file mode 100644
index 0000000..87e3efd
--- /dev/null
+++ b/libc/stdlib/rand.c
@@ -0,0 +1,24 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdlib.h>
+
+
+static unsigned long _rand = 1;
+
+int
+rand(void)
+{
+ _rand = _rand * 25364735 + 34563;
+
+ return ((unsigned int) (_rand << 16) & RAND_MAX);
+}
diff --git a/libc/stdlib/strtol.c b/libc/stdlib/strtol.c
new file mode 100644
index 0000000..474597a
--- /dev/null
+++ b/libc/stdlib/strtol.c
@@ -0,0 +1,115 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdlib.h>
+
+long int strtol(const char *S, char **PTR,int BASE)
+{
+ long rval = 0;
+ short int negative = 0;
+ short int digit;
+ // *PTR is S, unless PTR is NULL, in which case i override it with my own ptr
+ char* ptr;
+ if (PTR == 0)
+ {
+ //override
+ PTR = &ptr;
+ }
+ // i use PTR to advance through the string
+ *PTR = (char *) S;
+ //check if BASE is ok
+ if ((BASE < 0) || BASE > 36)
+ {
+ return 0;
+ }
+ // ignore white space at beginning of S
+ while ((**PTR == ' ')
+ || (**PTR == '\t')
+ || (**PTR == '\n')
+ || (**PTR == '\r')
+ )
+ {
+ (*PTR)++;
+ }
+ // check if S starts with "-" in which case the return value is negative
+ if (**PTR == '-')
+ {
+ negative = 1;
+ (*PTR)++;
+ }
+ // if BASE is 0... determine the base from the first chars...
+ if (BASE == 0)
+ {
+ // if S starts with "0x", BASE = 16, else 10
+ if ((**PTR == '0') && (*((*PTR)+1) == 'x'))
+ {
+ BASE = 16;
+ (*PTR)++;
+ (*PTR)++;
+ }
+ else
+ {
+ BASE = 10;
+ }
+ }
+ if (BASE == 16)
+ {
+ // S may start with "0x"
+ if ((**PTR == '0') && (*((*PTR)+1) == 'x'))
+ {
+ (*PTR)++;
+ (*PTR)++;
+ }
+ }
+ //until end of string
+ while (**PTR)
+ {
+ if (((**PTR) >= '0') && ((**PTR) <= '9'))
+ {
+ //digit (0..9)
+ digit = **PTR - '0';
+ }
+ else if (((**PTR) >= 'a') && ((**PTR) <='z'))
+ {
+ //alphanumeric digit lowercase(a (10) .. z (35) )
+ digit = (**PTR - 'a') + 10;
+ }
+ else if (((**PTR) >= 'A') && ((**PTR) <='Z'))
+ {
+ //alphanumeric digit uppercase(a (10) .. z (35) )
+ digit = (**PTR - 'A') + 10;
+ }
+ else
+ {
+ //end of parseable number reached...
+ break;
+ }
+ if (digit < BASE)
+ {
+ rval = (rval * BASE) + digit;
+ }
+ else
+ {
+ //digit found, but its too big for current base
+ //end of parseable number reached...
+ break;
+ }
+ //next...
+ (*PTR)++;
+ }
+ if (negative)
+ {
+ return rval * -1;
+ }
+ //else
+ return rval;
+}
diff --git a/libc/stdlib/strtoul.c b/libc/stdlib/strtoul.c
new file mode 100644
index 0000000..754e7db
--- /dev/null
+++ b/libc/stdlib/strtoul.c
@@ -0,0 +1,105 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <stdlib.h>
+
+unsigned long int strtoul(const char *S, char **PTR,int BASE)
+{
+ unsigned long rval = 0;
+ short int digit;
+ // *PTR is S, unless PTR is NULL, in which case i override it with my own ptr
+ char* ptr;
+ if (PTR == 0)
+ {
+ //override
+ PTR = &ptr;
+ }
+ // i use PTR to advance through the string
+ *PTR = (char *) S;
+ //check if BASE is ok
+ if ((BASE < 0) || BASE > 36)
+ {
+ return 0;
+ }
+ // ignore white space at beginning of S
+ while ((**PTR == ' ')
+ || (**PTR == '\t')
+ || (**PTR == '\n')
+ || (**PTR == '\r')
+ )
+ {
+ (*PTR)++;
+ }
+ // if BASE is 0... determine the base from the first chars...
+ if (BASE == 0)
+ {
+ // if S starts with "0x", BASE = 16, else 10
+ if ((**PTR == '0') && (*((*PTR)+1) == 'x'))
+ {
+ BASE = 16;
+ (*PTR)++;
+ (*PTR)++;
+ }
+ else
+ {
+ BASE = 10;
+ }
+ }
+ if (BASE == 16)
+ {
+ // S may start with "0x"
+ if ((**PTR == '0') && (*((*PTR)+1) == 'x'))
+ {
+ (*PTR)++;
+ (*PTR)++;
+ }
+ }
+ //until end of string
+ while (**PTR)
+ {
+ if (((**PTR) >= '0') && ((**PTR) <='9'))
+ {
+ //digit (0..9)
+ digit = **PTR - '0';
+ }
+ else if (((**PTR) >= 'a') && ((**PTR) <='z'))
+ {
+ //alphanumeric digit lowercase(a (10) .. z (35) )
+ digit = (**PTR - 'a') + 10;
+ }
+ else if (((**PTR) >= 'A') && ((**PTR) <='Z'))
+ {
+ //alphanumeric digit uppercase(a (10) .. z (35) )
+ digit = (**PTR - 'A') + 10;
+ }
+ else
+ {
+ //end of parseable number reached...
+ break;
+ }
+ if (digit < BASE)
+ {
+ rval = (rval * BASE) + digit;
+ }
+ else
+ {
+ //digit found, but its too big for current base
+ //end of parseable number reached...
+ break;
+ }
+ //next...
+ (*PTR)++;
+ }
+ //done
+ return rval;
+}
+
diff --git a/libc/string/Makefile.inc b/libc/string/Makefile.inc
new file mode 100644
index 0000000..3b7c8ce
--- /dev/null
+++ b/libc/string/Makefile.inc
@@ -0,0 +1,20 @@
+# *****************************************************************************
+# * Copyright (c) 2004, 2008 IBM Corporation
+# * All rights reserved.
+# * This program and the accompanying materials
+# * are made available under the terms of the BSD License
+# * which accompanies this distribution, and is available at
+# * http://www.opensource.org/licenses/bsd-license.php
+# *
+# * Contributors:
+# * IBM Corporation - initial implementation
+# ****************************************************************************/
+
+SUBDIRS += $(LIBCDIR)/string
+
+STRING_OBJS = strcat.o strchr.o strcmp.o strcpy.o strlen.o strncmp.o \
+ strncpy.o strstr.o memset.o memcpy.o memmove.o memchr.o \
+ memcmp.o strcasecmp.o strncasecmp.o strtok.o strdup.o
+STRING = $(LIBCDIR)/string/built-in.o
+$(STRING): $(STRING_OBJS:%=$(LIBCDIR)/string/%)
+
diff --git a/libc/string/memchr.c b/libc/string/memchr.c
new file mode 100644
index 0000000..c3fe751
--- /dev/null
+++ b/libc/string/memchr.c
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "string.h"
+
+
+void *
+memchr(const void *ptr, int c, size_t n)
+{
+ unsigned char ch = (unsigned char)c;
+ const unsigned char *p = ptr;
+
+ while (n-- > 0) {
+ if (*p == ch)
+ return (void *)p;
+ p += 1;
+ }
+
+ return NULL;
+}
diff --git a/libc/string/memcmp.c b/libc/string/memcmp.c
new file mode 100644
index 0000000..3b69cef
--- /dev/null
+++ b/libc/string/memcmp.c
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "string.h"
+
+
+int
+memcmp(const void *ptr1, const void *ptr2, size_t n)
+{
+ const unsigned char *p1 = ptr1;
+ const unsigned char *p2 = ptr2;
+
+ while (n-- > 0) {
+ if (*p1 != *p2)
+ return (*p1 - *p2);
+ p1 += 1;
+ p2 += 1;
+ }
+
+ return 0;
+}
diff --git a/libc/string/memcpy.c b/libc/string/memcpy.c
new file mode 100644
index 0000000..00f419b
--- /dev/null
+++ b/libc/string/memcpy.c
@@ -0,0 +1,27 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "string.h"
+
+void *
+memcpy(void *dest, const void *src, size_t n)
+{
+ char *cdest;
+ const char *csrc = src;
+
+ cdest = dest;
+ while (n-- > 0) {
+ *cdest++ = *csrc++;
+ }
+
+ return dest;
+}
diff --git a/libc/string/memmove.c b/libc/string/memmove.c
new file mode 100644
index 0000000..3acf1a9
--- /dev/null
+++ b/libc/string/memmove.c
@@ -0,0 +1,42 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "string.h"
+
+
+void *
+memmove(void *dest, const void *src, size_t n)
+{
+ char *cdest;
+ const char *csrc;
+ int i;
+
+ /* Do the buffers overlap in a bad way? */
+ if (src < dest && src + n >= dest) {
+ /* Copy from end to start */
+ cdest = dest + n - 1;
+ csrc = src + n - 1;
+ for (i = 0; i < n; i++) {
+ *cdest-- = *csrc--;
+ }
+ }
+ else {
+ /* Normal copy is possible */
+ cdest = dest;
+ csrc = src;
+ for (i = 0; i < n; i++) {
+ *cdest++ = *csrc++;
+ }
+ }
+
+ return dest;
+}
diff --git a/libc/string/memset.c b/libc/string/memset.c
new file mode 100644
index 0000000..f8dfbf5
--- /dev/null
+++ b/libc/string/memset.c
@@ -0,0 +1,25 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include "string.h"
+
+void *
+memset(void *dest, int c, size_t size)
+{
+ unsigned char *d = (unsigned char *)dest;
+
+ while (size-- > 0) {
+ *d++ = (unsigned char)c;
+ }
+
+ return dest;
+}
diff --git a/libc/string/strcasecmp.c b/libc/string/strcasecmp.c
new file mode 100644
index 0000000..f75294f
--- /dev/null
+++ b/libc/string/strcasecmp.c
@@ -0,0 +1,28 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+#include <ctype.h>
+
+int
+strcasecmp(const char *s1, const char *s2)
+{
+ while (*s1 != 0 && *s2 != 0) {
+ if (toupper(*s1) != toupper(*s2))
+ break;
+ ++s1;
+ ++s2;
+ }
+
+ return *s1 - *s2;
+}
+
diff --git a/libc/string/strcat.c b/libc/string/strcat.c
new file mode 100644
index 0000000..eb597a0
--- /dev/null
+++ b/libc/string/strcat.c
@@ -0,0 +1,24 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+char *
+strcat(char *dst, const char *src)
+{
+ int p;
+
+ p = strlen(dst);
+ strcpy(&dst[p], src);
+
+ return dst;
+}
diff --git a/libc/string/strchr.c b/libc/string/strchr.c
new file mode 100644
index 0000000..528a319
--- /dev/null
+++ b/libc/string/strchr.c
@@ -0,0 +1,28 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+char *
+strchr(const char *s, int c)
+{
+ char cb = c;
+
+ while (*s != 0) {
+ if (*s == cb) {
+ return (char *)s;
+ }
+ s += 1;
+ }
+
+ return NULL;
+}
diff --git a/libc/string/strcmp.c b/libc/string/strcmp.c
new file mode 100644
index 0000000..48eaed2
--- /dev/null
+++ b/libc/string/strcmp.c
@@ -0,0 +1,28 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+
+int
+strcmp(const char *s1, const char *s2)
+{
+ while (*s1 != 0 && *s2 != 0) {
+ if (*s1 != *s2)
+ break;
+ s1 += 1;
+ s2 += 1;
+ }
+
+ return *s1 - *s2;
+}
+
diff --git a/libc/string/strcpy.c b/libc/string/strcpy.c
new file mode 100644
index 0000000..48eb62c
--- /dev/null
+++ b/libc/string/strcpy.c
@@ -0,0 +1,25 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+char *
+strcpy(char *dst, const char *src)
+{
+ char *ptr = dst;
+
+ do {
+ *ptr++ = *src;
+ } while (*src++ != 0);
+
+ return dst;
+}
diff --git a/libc/string/strdup.c b/libc/string/strdup.c
new file mode 100644
index 0000000..be91e23
--- /dev/null
+++ b/libc/string/strdup.c
@@ -0,0 +1,25 @@
+/******************************************************************************
+ * Copyright (c) 2012 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+#include <stdlib.h>
+
+char *strdup(const char *src)
+{
+ size_t len = strlen(src) + 1;
+ char *ret;
+
+ ret = malloc(len);
+ if (ret)
+ memcpy(ret, src, len);
+ return ret;
+}
diff --git a/libc/string/strlen.c b/libc/string/strlen.c
new file mode 100644
index 0000000..37a1b78
--- /dev/null
+++ b/libc/string/strlen.c
@@ -0,0 +1,27 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+size_t
+strlen(const char *s)
+{
+ int len = 0;
+
+ while (*s != 0) {
+ len += 1;
+ s += 1;
+ }
+
+ return len;
+}
+
diff --git a/libc/string/strncasecmp.c b/libc/string/strncasecmp.c
new file mode 100644
index 0000000..4140931
--- /dev/null
+++ b/libc/string/strncasecmp.c
@@ -0,0 +1,32 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+#include <ctype.h>
+
+
+int
+strncasecmp(const char *s1, const char *s2, size_t n)
+{
+ if (n < 1)
+ return 0;
+
+ while (*s1 != 0 && *s2 != 0 && --n > 0) {
+ if (toupper(*s1) != toupper(*s2))
+ break;
+ ++s1;
+ ++s2;
+ }
+
+ return toupper(*s1) - toupper(*s2);
+}
+
diff --git a/libc/string/strncmp.c b/libc/string/strncmp.c
new file mode 100644
index 0000000..a886736
--- /dev/null
+++ b/libc/string/strncmp.c
@@ -0,0 +1,31 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+ if (n < 1)
+ return 0;
+
+ while (*s1 != 0 && *s2 != 0 && --n > 0) {
+ if (*s1 != *s2)
+ break;
+ s1 += 1;
+ s2 += 1;
+ }
+
+ return *s1 - *s2;
+}
+
diff --git a/libc/string/strncpy.c b/libc/string/strncpy.c
new file mode 100644
index 0000000..0f41f93
--- /dev/null
+++ b/libc/string/strncpy.c
@@ -0,0 +1,33 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+char *
+strncpy(char *dst, const char *src, size_t n)
+{
+ char *ret = dst;
+
+ /* Copy string */
+ while (*src != 0 && n > 0) {
+ *dst++ = *src++;
+ n -= 1;
+ }
+
+ /* strncpy always clears the rest of destination string... */
+ while (n > 0) {
+ *dst++ = 0;
+ n -= 1;
+ }
+
+ return ret;
+}
diff --git a/libc/string/strstr.c b/libc/string/strstr.c
new file mode 100644
index 0000000..3e090d2
--- /dev/null
+++ b/libc/string/strstr.c
@@ -0,0 +1,37 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+char *
+strstr(const char *hay, const char *needle)
+{
+ char *pos;
+ int hlen, nlen;
+
+ if (hay == NULL || needle == NULL)
+ return NULL;
+
+ hlen = strlen(hay);
+ nlen = strlen(needle);
+ if (nlen < 1)
+ return (char *)hay;
+
+ for (pos = (char *)hay; pos < hay + hlen; pos++) {
+ if (strncmp(pos, needle, nlen) == 0) {
+ return pos;
+ }
+ }
+
+ return NULL;
+}
+
diff --git a/libc/string/strtok.c b/libc/string/strtok.c
new file mode 100644
index 0000000..665c08d
--- /dev/null
+++ b/libc/string/strtok.c
@@ -0,0 +1,45 @@
+/******************************************************************************
+ * Copyright (c) 2004, 2008 IBM Corporation
+ * All rights reserved.
+ * This program and the accompanying materials
+ * are made available under the terms of the BSD License
+ * which accompanies this distribution, and is available at
+ * http://www.opensource.org/licenses/bsd-license.php
+ *
+ * Contributors:
+ * IBM Corporation - initial implementation
+ *****************************************************************************/
+
+#include <string.h>
+
+char *
+strtok(char *src, const char *pattern)
+{
+ static char *nxtTok;
+ char *retVal = NULL;
+
+ if (!src)
+ src = nxtTok;
+
+ while (*src) {
+ const char *pp = pattern;
+ while (*pp) {
+ if (*pp == *src) {
+ break;
+ }
+ pp++;
+ }
+ if (!*pp) {
+ if (!retVal)
+ retVal = src;
+ else if (!src[-1])
+ break;
+ } else
+ *src = '\0';
+ src++;
+ }
+
+ nxtTok = src;
+
+ return retVal;
+}
diff --git a/libfdt/Makefile.inc b/libfdt/Makefile.inc
new file mode 100644
index 0000000..e477d7c
--- /dev/null
+++ b/libfdt/Makefile.inc
@@ -0,0 +1,18 @@
+# Makefile.libfdt
+#
+# This is not a complete Makefile of itself. Instead, it is designed to
+# be easily embeddable into other systems of Makefiles.
+#
+
+LIBFDT_INCLUDES = fdt.h libfdt.h
+LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
+LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)
+
+# That warning can't really be fixed so compile the file without it
+CFLAGS_SKIP_libfdt/fdt_sw.o = -Wstack-usage=4096
+
+SUBDIRS += libfdt
+LIBFDT = libfdt/built-in.o
+
+$(LIBFDT): $(LIBFDT_OBJS:%=libfdt/%)
+
diff --git a/libfdt/Makefile.libfdt b/libfdt/Makefile.libfdt
new file mode 100644
index 0000000..341c803
--- /dev/null
+++ b/libfdt/Makefile.libfdt
@@ -0,0 +1,9 @@
+# Makefile.libfdt
+#
+# This is not a complete Makefile of itself. Instead, it is designed to
+# be easily embeddable into other systems of Makefiles.
+#
+LIBFDT_INCLUDES = fdt.h libfdt.h
+LIBFDT_VERSION = version.lds
+LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
+LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)
diff --git a/libfdt/TODO b/libfdt/TODO
new file mode 100644
index 0000000..288437e
--- /dev/null
+++ b/libfdt/TODO
@@ -0,0 +1,3 @@
+- Tree traversal functions
+- Graft function
+- Complete libfdt.h documenting comments
diff --git a/libfdt/fdt.c b/libfdt/fdt.c
new file mode 100644
index 0000000..b1130c2
--- /dev/null
+++ b/libfdt/fdt.c
@@ -0,0 +1,213 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_check_header(const void *fdt)
+{
+ if (fdt_magic(fdt) == FDT_MAGIC) {
+ /* Complete tree */
+ if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
+ return -FDT_ERR_BADVERSION;
+ if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)
+ return -FDT_ERR_BADVERSION;
+ } else if (fdt_magic(fdt) == FDT_SW_MAGIC) {
+ /* Unfinished sequential-write blob */
+ if (fdt_size_dt_struct(fdt) == 0)
+ return -FDT_ERR_BADSTATE;
+ } else {
+ return -FDT_ERR_BADMAGIC;
+ }
+
+ return 0;
+}
+
+const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
+{
+ const char *p;
+
+ if (fdt_version(fdt) >= 0x11)
+ if (((offset + len) < offset)
+ || ((offset + len) > fdt_size_dt_struct(fdt)))
+ return NULL;
+
+ p = _fdt_offset_ptr(fdt, offset);
+
+ if (p + len < p)
+ return NULL;
+ return p;
+}
+
+uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
+{
+ const uint32_t *tagp, *lenp;
+ uint32_t tag;
+ int offset = startoffset;
+ const char *p;
+
+ *nextoffset = -FDT_ERR_TRUNCATED;
+ tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
+ if (!tagp)
+ return FDT_END; /* premature end */
+ tag = fdt32_to_cpu(*tagp);
+ offset += FDT_TAGSIZE;
+
+ *nextoffset = -FDT_ERR_BADSTRUCTURE;
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ /* skip name */
+ do {
+ p = fdt_offset_ptr(fdt, offset++, 1);
+ } while (p && (*p != '\0'));
+ if (!p)
+ return FDT_END; /* premature end */
+ break;
+
+ case FDT_PROP:
+ lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
+ if (!lenp)
+ return FDT_END; /* premature end */
+ /* skip-name offset, length and value */
+ offset += sizeof(struct fdt_property) - FDT_TAGSIZE
+ + fdt32_to_cpu(*lenp);
+ break;
+
+ case FDT_END:
+ case FDT_END_NODE:
+ case FDT_NOP:
+ break;
+
+ default:
+ return FDT_END;
+ }
+
+ if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset))
+ return FDT_END; /* premature end */
+
+ *nextoffset = FDT_TAGALIGN(offset);
+ return tag;
+}
+
+int _fdt_check_node_offset(const void *fdt, int offset)
+{
+ if ((offset < 0) || (offset % FDT_TAGSIZE)
+ || (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE))
+ return -FDT_ERR_BADOFFSET;
+
+ return offset;
+}
+
+int fdt_next_node(const void *fdt, int offset, int *depth)
+{
+ int nextoffset = 0;
+ uint32_t tag;
+
+ if (offset >= 0)
+ if ((nextoffset = _fdt_check_node_offset(fdt, offset)) < 0)
+ return nextoffset;
+
+ do {
+ offset = nextoffset;
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+
+ switch (tag) {
+ case FDT_PROP:
+ case FDT_NOP:
+ break;
+
+ case FDT_BEGIN_NODE:
+ if (depth)
+ (*depth)++;
+ break;
+
+ case FDT_END_NODE:
+ if (depth && ((--(*depth)) < 0))
+ return nextoffset;
+ break;
+
+ case FDT_END:
+ if ((nextoffset >= 0)
+ || ((nextoffset == -FDT_ERR_TRUNCATED) && !depth))
+ return -FDT_ERR_NOTFOUND;
+ else
+ return nextoffset;
+ }
+ } while (tag != FDT_BEGIN_NODE);
+
+ return offset;
+}
+
+const char *_fdt_find_string(const char *strtab, int tabsize, const char *s)
+{
+ int len = strlen(s) + 1;
+ const char *last = strtab + tabsize - len;
+ const char *p;
+
+ for (p = strtab; p <= last; p++)
+ if (memcmp(p, s, len) == 0)
+ return p;
+ return NULL;
+}
+
+int fdt_move(const void *fdt, void *buf, int bufsize)
+{
+ FDT_CHECK_HEADER(fdt);
+
+ if (fdt_totalsize(fdt) > bufsize)
+ return -FDT_ERR_NOSPACE;
+
+ memmove(buf, fdt, fdt_totalsize(fdt));
+ return 0;
+}
diff --git a/libfdt/fdt.h b/libfdt/fdt.h
new file mode 100644
index 0000000..48ccfd9
--- /dev/null
+++ b/libfdt/fdt.h
@@ -0,0 +1,60 @@
+#ifndef _FDT_H
+#define _FDT_H
+
+#ifndef __ASSEMBLY__
+
+struct fdt_header {
+ uint32_t magic; /* magic word FDT_MAGIC */
+ uint32_t totalsize; /* total size of DT block */
+ uint32_t off_dt_struct; /* offset to structure */
+ uint32_t off_dt_strings; /* offset to strings */
+ uint32_t off_mem_rsvmap; /* offset to memory reserve map */
+ uint32_t version; /* format version */
+ uint32_t last_comp_version; /* last compatible version */
+
+ /* version 2 fields below */
+ uint32_t boot_cpuid_phys; /* Which physical CPU id we're
+ booting on */
+ /* version 3 fields below */
+ uint32_t size_dt_strings; /* size of the strings block */
+
+ /* version 17 fields below */
+ uint32_t size_dt_struct; /* size of the structure block */
+};
+
+struct fdt_reserve_entry {
+ uint64_t address;
+ uint64_t size;
+};
+
+struct fdt_node_header {
+ uint32_t tag;
+ char name[0];
+};
+
+struct fdt_property {
+ uint32_t tag;
+ uint32_t len;
+ uint32_t nameoff;
+ char data[0];
+};
+
+#endif /* !__ASSEMBLY */
+
+#define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */
+#define FDT_TAGSIZE sizeof(uint32_t)
+
+#define FDT_BEGIN_NODE 0x1 /* Start node: full name */
+#define FDT_END_NODE 0x2 /* End node */
+#define FDT_PROP 0x3 /* Property: name off,
+ size, content */
+#define FDT_NOP 0x4 /* nop */
+#define FDT_END 0x9
+
+#define FDT_V1_SIZE (7*sizeof(uint32_t))
+#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(uint32_t))
+#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(uint32_t))
+#define FDT_V16_SIZE FDT_V3_SIZE
+#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(uint32_t))
+
+#endif /* _FDT_H */
diff --git a/libfdt/fdt_ro.c b/libfdt/fdt_ro.c
new file mode 100644
index 0000000..3874195
--- /dev/null
+++ b/libfdt/fdt_ro.c
@@ -0,0 +1,528 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int _fdt_nodename_eq(const void *fdt, int offset,
+ const char *s, int len)
+{
+ const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1);
+
+ if (! p)
+ /* short match */
+ return 0;
+
+ if (memcmp(p, s, len) != 0)
+ return 0;
+
+ if (p[len] == '\0')
+ return 1;
+ else if (!memchr(s, '@', len) && (p[len] == '@'))
+ return 1;
+ else
+ return 0;
+}
+
+const char *fdt_string(const void *fdt, int stroffset)
+{
+ return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
+}
+
+static int _fdt_string_eq(const void *fdt, int stroffset,
+ const char *s, int len)
+{
+ const char *p = fdt_string(fdt, stroffset);
+
+ return (strlen(p) == len) && (memcmp(p, s, len) == 0);
+}
+
+int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
+{
+ FDT_CHECK_HEADER(fdt);
+ *address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address);
+ *size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size);
+ return 0;
+}
+
+int fdt_num_mem_rsv(const void *fdt)
+{
+ int i = 0;
+
+ while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0)
+ i++;
+ return i;
+}
+
+int fdt_subnode_offset_namelen(const void *fdt, int offset,
+ const char *name, int namelen)
+{
+ int depth;
+
+ FDT_CHECK_HEADER(fdt);
+
+ for (depth = 0;
+ (offset >= 0) && (depth >= 0);
+ offset = fdt_next_node(fdt, offset, &depth))
+ if ((depth == 1)
+ && _fdt_nodename_eq(fdt, offset, name, namelen))
+ return offset;
+
+ if (depth < 0)
+ return -FDT_ERR_NOTFOUND;
+ return offset; /* error */
+}
+
+int fdt_sibling_offset_namelen(const void *fdt, int offset,
+ const char *name, int namelen)
+{
+ int depth;
+
+ FDT_CHECK_HEADER(fdt);
+
+ for (depth = 1, offset = fdt_next_node(fdt, offset, &depth);
+ (offset >= 0) && (depth >= 1);
+ offset = fdt_next_node(fdt, offset, &depth)) {
+ if ((depth == 1)
+ && _fdt_nodename_eq(fdt, offset, name, namelen))
+ return offset;
+ }
+
+ if (depth < 0)
+ return -FDT_ERR_NOTFOUND;
+ return offset; /* error */
+}
+
+int fdt_subnode_offset(const void *fdt, int parentoffset,
+ const char *name)
+{
+ return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
+}
+
+int fdt_sibling_offset(const void *fdt, int offset,
+ const char *name)
+{
+ return fdt_sibling_offset_namelen(fdt, offset, name, strlen(name));
+}
+
+int fdt_path_offset(const void *fdt, const char *path)
+{
+ const char *end = path + strlen(path);
+ const char *p = path;
+ int offset = 0;
+
+ FDT_CHECK_HEADER(fdt);
+
+ /* see if we have an alias */
+ if (*path != '/') {
+ const char *q = strchr(path, '/');
+
+ if (!q)
+ q = end;
+
+ p = fdt_get_alias_namelen(fdt, p, q - p);
+ if (!p)
+ return -FDT_ERR_BADPATH;
+ offset = fdt_path_offset(fdt, p);
+
+ p = q;
+ }
+
+ while (*p) {
+ const char *q;
+
+ while (*p == '/')
+ p++;
+ if (! *p)
+ return offset;
+ q = strchr(p, '/');
+ if (! q)
+ q = end;
+
+ offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
+ if (offset < 0)
+ return offset;
+
+ p = q;
+ }
+
+ return offset;
+}
+
+const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
+{
+ const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset);
+ int err;
+
+ if (((err = fdt_check_header(fdt)) != 0)
+ || ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
+ goto fail;
+
+ if (len)
+ *len = strlen(nh->name);
+
+ return nh->name;
+
+ fail:
+ if (len)
+ *len = err;
+ return NULL;
+}
+
+const struct fdt_property *fdt_get_property_namelen(const void *fdt,
+ int nodeoffset,
+ const char *name,
+ int namelen, int *lenp)
+{
+ uint32_t tag;
+ const struct fdt_property *prop;
+ int offset, nextoffset;
+ int err;
+
+ if (((err = fdt_check_header(fdt)) != 0)
+ || ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
+ goto fail;
+
+ nextoffset = err;
+ do {
+ offset = nextoffset;
+
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+ switch (tag) {
+ case FDT_END:
+ if (nextoffset < 0)
+ err = nextoffset;
+ else
+ /* FDT_END tag with unclosed nodes */
+ err = -FDT_ERR_BADSTRUCTURE;
+ goto fail;
+
+ case FDT_PROP:
+ prop = _fdt_offset_ptr(fdt, offset);
+ if (_fdt_string_eq(fdt, fdt32_to_cpu(prop->nameoff),
+ name, namelen)) {
+ /* Found it! */
+ if (lenp)
+ *lenp = fdt32_to_cpu(prop->len);
+
+ return prop;
+ }
+ break;
+ }
+ } while ((tag != FDT_BEGIN_NODE) && (tag != FDT_END_NODE));
+
+ err = -FDT_ERR_NOTFOUND;
+ fail:
+ if (lenp)
+ *lenp = err;
+ return NULL;
+}
+
+const struct fdt_property *fdt_get_property(const void *fdt,
+ int nodeoffset,
+ const char *name, int *lenp)
+{
+ return fdt_get_property_namelen(fdt, nodeoffset, name,
+ strlen(name), lenp);
+}
+
+const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
+ const char *name, int namelen, int *lenp)
+{
+ const struct fdt_property *prop;
+
+ prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp);
+ if (! prop)
+ return NULL;
+
+ return prop->data;
+}
+
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+ const char *name, int *lenp)
+{
+ return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp);
+}
+
+uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
+{
+ const uint32_t *php;
+ int len;
+
+ php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
+ if (!php || (len != sizeof(*php)))
+ return 0;
+
+ return fdt32_to_cpu(*php);
+}
+
+const char *fdt_get_alias_namelen(const void *fdt,
+ const char *name, int namelen)
+{
+ int aliasoffset;
+
+ aliasoffset = fdt_path_offset(fdt, "/aliases");
+ if (aliasoffset < 0)
+ return NULL;
+
+ return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL);
+}
+
+const char *fdt_get_alias(const void *fdt, const char *name)
+{
+ return fdt_get_alias_namelen(fdt, name, strlen(name));
+}
+
+int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
+{
+ int pdepth = 0, p = 0;
+ int offset, depth, namelen;
+ const char *name;
+
+ FDT_CHECK_HEADER(fdt);
+
+ if (buflen < 2)
+ return -FDT_ERR_NOSPACE;
+
+ for (offset = 0, depth = 0;
+ (offset >= 0) && (offset <= nodeoffset);
+ offset = fdt_next_node(fdt, offset, &depth)) {
+ while (pdepth > depth) {
+ do {
+ p--;
+ } while (buf[p-1] != '/');
+ pdepth--;
+ }
+
+ if (pdepth >= depth) {
+ name = fdt_get_name(fdt, offset, &namelen);
+ if (!name)
+ return namelen;
+ if ((p + namelen + 1) <= buflen) {
+ memcpy(buf + p, name, namelen);
+ p += namelen;
+ buf[p++] = '/';
+ pdepth++;
+ }
+ }
+
+ if (offset == nodeoffset) {
+ if (pdepth < (depth + 1))
+ return -FDT_ERR_NOSPACE;
+
+ if (p > 1) /* special case so that root path is "/", not "" */
+ p--;
+ buf[p] = '\0';
+ return 0;
+ }
+ }
+
+ if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
+ return -FDT_ERR_BADOFFSET;
+ else if (offset == -FDT_ERR_BADOFFSET)
+ return -FDT_ERR_BADSTRUCTURE;
+
+ return offset; /* error from fdt_next_node() */
+}
+
+int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
+ int supernodedepth, int *nodedepth)
+{
+ int offset, depth;
+ int supernodeoffset = -FDT_ERR_INTERNAL;
+
+ FDT_CHECK_HEADER(fdt);
+
+ if (supernodedepth < 0)
+ return -FDT_ERR_NOTFOUND;
+
+ for (offset = 0, depth = 0;
+ (offset >= 0) && (offset <= nodeoffset);
+ offset = fdt_next_node(fdt, offset, &depth)) {
+ if (depth == supernodedepth)
+ supernodeoffset = offset;
+
+ if (offset == nodeoffset) {
+ if (nodedepth)
+ *nodedepth = depth;
+
+ if (supernodedepth > depth)
+ return -FDT_ERR_NOTFOUND;
+ else
+ return supernodeoffset;
+ }
+ }
+
+ if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
+ return -FDT_ERR_BADOFFSET;
+ else if (offset == -FDT_ERR_BADOFFSET)
+ return -FDT_ERR_BADSTRUCTURE;
+
+ return offset; /* error from fdt_next_node() */
+}
+
+int fdt_node_depth(const void *fdt, int nodeoffset)
+{
+ int nodedepth;
+ int err;
+
+ err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
+ if (err)
+ return (err < 0) ? err : -FDT_ERR_INTERNAL;
+ return nodedepth;
+}
+
+int fdt_parent_offset(const void *fdt, int nodeoffset)
+{
+ int nodedepth = fdt_node_depth(fdt, nodeoffset);
+
+ if (nodedepth < 0)
+ return nodedepth;
+ return fdt_supernode_atdepth_offset(fdt, nodeoffset,
+ nodedepth - 1, NULL);
+}
+
+int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
+ const char *propname,
+ const void *propval, int proplen)
+{
+ int offset;
+ const void *val;
+ int len;
+
+ FDT_CHECK_HEADER(fdt);
+
+ /* FIXME: The algorithm here is pretty horrible: we scan each
+ * property of a node in fdt_getprop(), then if that didn't
+ * find what we want, we scan over them again making our way
+ * to the next node. Still it's the easiest to implement
+ * approach; performance can come later. */
+ for (offset = fdt_next_node(fdt, startoffset, NULL);
+ offset >= 0;
+ offset = fdt_next_node(fdt, offset, NULL)) {
+ val = fdt_getprop(fdt, offset, propname, &len);
+ if (val && (len == proplen)
+ && (memcmp(val, propval, len) == 0))
+ return offset;
+ }
+
+ return offset; /* error from fdt_next_node() */
+}
+
+int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
+{
+ if ((phandle == 0) || (phandle == -1))
+ return -FDT_ERR_BADPHANDLE;
+ phandle = cpu_to_fdt32(phandle);
+ return fdt_node_offset_by_prop_value(fdt, -1, "linux,phandle",
+ &phandle, sizeof(phandle));
+}
+
+static int _fdt_stringlist_contains(const char *strlist, int listlen,
+ const char *str)
+{
+ int len = strlen(str);
+ const char *p;
+
+ while (listlen >= len) {
+ if (memcmp(str, strlist, len+1) == 0)
+ return 1;
+ p = memchr(strlist, '\0', listlen);
+ if (!p)
+ return 0; /* malformed strlist.. */
+ listlen -= (p-strlist) + 1;
+ strlist = p + 1;
+ }
+ return 0;
+}
+
+int fdt_node_check_compatible(const void *fdt, int nodeoffset,
+ const char *compatible)
+{
+ const void *prop;
+ int len;
+
+ prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
+ if (!prop)
+ return len;
+ if (_fdt_stringlist_contains(prop, len, compatible))
+ return 0;
+ else
+ return 1;
+}
+
+int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
+ const char *compatible)
+{
+ int offset, err;
+
+ FDT_CHECK_HEADER(fdt);
+
+ /* FIXME: The algorithm here is pretty horrible: we scan each
+ * property of a node in fdt_node_check_compatible(), then if
+ * that didn't find what we want, we scan over them again
+ * making our way to the next node. Still it's the easiest to
+ * implement approach; performance can come later. */
+ for (offset = fdt_next_node(fdt, startoffset, NULL);
+ offset >= 0;
+ offset = fdt_next_node(fdt, offset, NULL)) {
+ err = fdt_node_check_compatible(fdt, offset, compatible);
+ if ((err < 0) && (err != -FDT_ERR_NOTFOUND))
+ return err;
+ else if (err == 0)
+ return offset;
+ }
+
+ return offset; /* error from fdt_next_node() */
+}
diff --git a/libfdt/fdt_rw.c b/libfdt/fdt_rw.c
new file mode 100644
index 0000000..994037b
--- /dev/null
+++ b/libfdt/fdt_rw.c
@@ -0,0 +1,465 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int _fdt_blocks_misordered(const void *fdt,
+ int mem_rsv_size, int struct_size)
+{
+ return (fdt_off_mem_rsvmap(fdt) < FDT_ALIGN(sizeof(struct fdt_header), 8))
+ || (fdt_off_dt_struct(fdt) <
+ (fdt_off_mem_rsvmap(fdt) + mem_rsv_size))
+ || (fdt_off_dt_strings(fdt) <
+ (fdt_off_dt_struct(fdt) + struct_size))
+ || (fdt_totalsize(fdt) <
+ (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt)));
+}
+
+static int _fdt_rw_check_header(void *fdt)
+{
+ FDT_CHECK_HEADER(fdt);
+
+ if (fdt_version(fdt) < 17)
+ return -FDT_ERR_BADVERSION;
+ if (_fdt_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry),
+ fdt_size_dt_struct(fdt)))
+ return -FDT_ERR_BADLAYOUT;
+ if (fdt_version(fdt) > 17)
+ fdt_set_version(fdt, 17);
+
+ return 0;
+}
+
+#define FDT_RW_CHECK_HEADER(fdt) \
+ { \
+ int err; \
+ if ((err = _fdt_rw_check_header(fdt)) != 0) \
+ return err; \
+ }
+
+static inline int _fdt_data_size(void *fdt)
+{
+ return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
+}
+
+static int _fdt_splice(void *fdt, void *splicepoint, int oldlen, int newlen)
+{
+ char *p = splicepoint;
+ char *end = (char *)fdt + _fdt_data_size(fdt);
+
+ if (((p + oldlen) < p) || ((p + oldlen) > end))
+ return -FDT_ERR_BADOFFSET;
+ if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt)))
+ return -FDT_ERR_NOSPACE;
+ memmove(p + newlen, p + oldlen, end - p - oldlen);
+ return 0;
+}
+
+static int _fdt_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p,
+ int oldn, int newn)
+{
+ int delta = (newn - oldn) * sizeof(*p);
+ int err;
+ err = _fdt_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p));
+ if (err)
+ return err;
+ fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta);
+ fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
+ return 0;
+}
+
+static int _fdt_splice_struct(void *fdt, void *p,
+ int oldlen, int newlen)
+{
+ int delta = newlen - oldlen;
+ int err;
+
+ if ((err = _fdt_splice(fdt, p, oldlen, newlen)))
+ return err;
+
+ fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta);
+ fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
+ return 0;
+}
+
+static int _fdt_splice_string(void *fdt, int newlen)
+{
+ void *p = (char *)fdt
+ + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
+ int err;
+
+ if ((err = _fdt_splice(fdt, p, 0, newlen)))
+ return err;
+
+ fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen);
+ return 0;
+}
+
+static int _fdt_find_add_string(void *fdt, const char *s)
+{
+ char *strtab = (char *)fdt + fdt_off_dt_strings(fdt);
+ const char *p;
+ char *new;
+ int len = strlen(s) + 1;
+ int err;
+
+ p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s);
+ if (p)
+ /* found it */
+ return (p - strtab);
+
+ new = strtab + fdt_size_dt_strings(fdt);
+ err = _fdt_splice_string(fdt, len);
+ if (err)
+ return err;
+
+ memcpy(new, s, len);
+ return (new - strtab);
+}
+
+int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size)
+{
+ struct fdt_reserve_entry *re;
+ int err;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt));
+ err = _fdt_splice_mem_rsv(fdt, re, 0, 1);
+ if (err)
+ return err;
+
+ re->address = cpu_to_fdt64(address);
+ re->size = cpu_to_fdt64(size);
+ return 0;
+}
+
+int fdt_del_mem_rsv(void *fdt, int n)
+{
+ struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n);
+ int err;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ if (n >= fdt_num_mem_rsv(fdt))
+ return -FDT_ERR_NOTFOUND;
+
+ err = _fdt_splice_mem_rsv(fdt, re, 1, 0);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int _fdt_resize_property(void *fdt, int nodeoffset, const char *name,
+ int len, struct fdt_property **prop)
+{
+ int oldlen;
+ int err;
+
+ *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
+ if (! (*prop))
+ return oldlen;
+
+ if ((err = _fdt_splice_struct(fdt, (*prop)->data, FDT_TAGALIGN(oldlen),
+ FDT_TAGALIGN(len))))
+ return err;
+
+ (*prop)->len = cpu_to_fdt32(len);
+ return 0;
+}
+
+static int _fdt_add_property(void *fdt, int nodeoffset, const char *name,
+ int len, struct fdt_property **prop)
+{
+ int proplen;
+ int nextoffset;
+ int namestroff;
+ int err;
+
+ if ((nextoffset = _fdt_check_node_offset(fdt, nodeoffset)) < 0)
+ return nextoffset;
+
+ namestroff = _fdt_find_add_string(fdt, name);
+ if (namestroff < 0)
+ return namestroff;
+
+ *prop = _fdt_offset_ptr_w(fdt, nextoffset);
+ proplen = sizeof(**prop) + FDT_TAGALIGN(len);
+
+ err = _fdt_splice_struct(fdt, *prop, 0, proplen);
+ if (err)
+ return err;
+
+ (*prop)->tag = cpu_to_fdt32(FDT_PROP);
+ (*prop)->nameoff = cpu_to_fdt32(namestroff);
+ (*prop)->len = cpu_to_fdt32(len);
+ return 0;
+}
+
+int fdt_set_name(void *fdt, int nodeoffset, const char *name)
+{
+ char *namep;
+ int oldlen, newlen;
+ int err;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen);
+ if (!namep)
+ return oldlen;
+
+ newlen = strlen(name);
+
+ err = _fdt_splice_struct(fdt, namep, FDT_TAGALIGN(oldlen+1),
+ FDT_TAGALIGN(newlen+1));
+ if (err)
+ return err;
+
+ memcpy(namep, name, newlen+1);
+ return 0;
+}
+
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len)
+{
+ struct fdt_property *prop;
+ int err;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ err = _fdt_resize_property(fdt, nodeoffset, name, len, &prop);
+ if (err == -FDT_ERR_NOTFOUND)
+ err = _fdt_add_property(fdt, nodeoffset, name, len, &prop);
+ if (err)
+ return err;
+
+ memcpy(prop->data, val, len);
+ return 0;
+}
+
+int fdt_delprop(void *fdt, int nodeoffset, const char *name)
+{
+ struct fdt_property *prop;
+ int len, proplen;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
+ if (! prop)
+ return len;
+
+ proplen = sizeof(*prop) + FDT_TAGALIGN(len);
+ return _fdt_splice_struct(fdt, prop, proplen, 0);
+}
+
+int fdt_add_subnode_namelen(void *fdt, int parentoffset,
+ const char *name, int namelen)
+{
+ struct fdt_node_header *nh;
+ int offset, nextoffset;
+ int nodelen;
+ int err;
+ uint32_t tag;
+ uint32_t *endtag;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen);
+ if (offset >= 0)
+ return -FDT_ERR_EXISTS;
+ else if (offset != -FDT_ERR_NOTFOUND)
+ return offset;
+
+ /* Try to place the new node after the parent's properties */
+ fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */
+ do {
+ offset = nextoffset;
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+ } while ((tag == FDT_PROP) || (tag == FDT_NOP));
+
+ nh = _fdt_offset_ptr_w(fdt, offset);
+ nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE;
+
+ err = _fdt_splice_struct(fdt, nh, 0, nodelen);
+ if (err)
+ return err;
+
+ nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
+ memset(nh->name, 0, FDT_TAGALIGN(namelen+1));
+ memcpy(nh->name, name, namelen);
+ endtag = (uint32_t *)((char *)nh + nodelen - FDT_TAGSIZE);
+ *endtag = cpu_to_fdt32(FDT_END_NODE);
+
+ return offset;
+}
+
+int fdt_add_subnode(void *fdt, int parentoffset, const char *name)
+{
+ return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name));
+}
+
+int fdt_del_node(void *fdt, int nodeoffset)
+{
+ int endoffset;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ endoffset = _fdt_node_end_offset(fdt, nodeoffset);
+ if (endoffset < 0)
+ return endoffset;
+
+ return _fdt_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset),
+ endoffset - nodeoffset, 0);
+}
+
+static void _fdt_packblocks(const char *old, char *new,
+ int mem_rsv_size, int struct_size)
+{
+ int mem_rsv_off, struct_off, strings_off;
+
+ mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
+ struct_off = mem_rsv_off + mem_rsv_size;
+ strings_off = struct_off + struct_size;
+
+ memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size);
+ fdt_set_off_mem_rsvmap(new, mem_rsv_off);
+
+ memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size);
+ fdt_set_off_dt_struct(new, struct_off);
+ fdt_set_size_dt_struct(new, struct_size);
+
+ memmove(new + strings_off, old + fdt_off_dt_strings(old),
+ fdt_size_dt_strings(old));
+ fdt_set_off_dt_strings(new, strings_off);
+ fdt_set_size_dt_strings(new, fdt_size_dt_strings(old));
+}
+
+int fdt_open_into(const void *fdt, void *buf, int bufsize)
+{
+ int err;
+ int mem_rsv_size, struct_size;
+ int newsize;
+ const char *fdtstart = fdt;
+ const char *fdtend = fdtstart + fdt_totalsize(fdt);
+ char *tmp;
+
+ FDT_CHECK_HEADER(fdt);
+
+ mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
+ * sizeof(struct fdt_reserve_entry);
+
+ if (fdt_version(fdt) >= 17) {
+ struct_size = fdt_size_dt_struct(fdt);
+ } else {
+ struct_size = 0;
+ while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END)
+ ;
+ if (struct_size < 0)
+ return struct_size;
+ }
+
+ if (!_fdt_blocks_misordered(fdt, mem_rsv_size, struct_size)) {
+ /* no further work necessary */
+ err = fdt_move(fdt, buf, bufsize);
+ if (err)
+ return err;
+ fdt_set_version(buf, 17);
+ fdt_set_size_dt_struct(buf, struct_size);
+ fdt_set_totalsize(buf, bufsize);
+ return 0;
+ }
+
+ /* Need to reorder */
+ newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size
+ + struct_size + fdt_size_dt_strings(fdt);
+
+ if (bufsize < newsize)
+ return -FDT_ERR_NOSPACE;
+
+ /* First attempt to build converted tree at beginning of buffer */
+ tmp = buf;
+ /* But if that overlaps with the old tree... */
+ if (((tmp + newsize) > fdtstart) && (tmp < fdtend)) {
+ /* Try right after the old tree instead */
+ tmp = (char *)(uintptr_t)fdtend;
+ if ((tmp + newsize) > ((char *)buf + bufsize))
+ return -FDT_ERR_NOSPACE;
+ }
+
+ _fdt_packblocks(fdt, tmp, mem_rsv_size, struct_size);
+ memmove(buf, tmp, newsize);
+
+ fdt_set_magic(buf, FDT_MAGIC);
+ fdt_set_totalsize(buf, bufsize);
+ fdt_set_version(buf, 17);
+ fdt_set_last_comp_version(buf, 16);
+ fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt));
+
+ return 0;
+}
+
+int fdt_pack(void *fdt)
+{
+ int mem_rsv_size;
+
+ FDT_RW_CHECK_HEADER(fdt);
+
+ mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
+ * sizeof(struct fdt_reserve_entry);
+ _fdt_packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt));
+ fdt_set_totalsize(fdt, _fdt_data_size(fdt));
+
+ return 0;
+}
diff --git a/libfdt/fdt_strerror.c b/libfdt/fdt_strerror.c
new file mode 100644
index 0000000..0538a8e
--- /dev/null
+++ b/libfdt/fdt_strerror.c
@@ -0,0 +1,96 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compiler.h>
+#include "libfdt_env.h"
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+struct fdt_errtabent {
+ const char *str;
+};
+
+#define FDT_ERRTABENT(val) \
+ [(val)] = { .str = #val, }
+
+static struct fdt_errtabent fdt_errtable[] = {
+ FDT_ERRTABENT(FDT_ERR_NOTFOUND),
+ FDT_ERRTABENT(FDT_ERR_EXISTS),
+ FDT_ERRTABENT(FDT_ERR_NOSPACE),
+
+ FDT_ERRTABENT(FDT_ERR_BADOFFSET),
+ FDT_ERRTABENT(FDT_ERR_BADPATH),
+ FDT_ERRTABENT(FDT_ERR_BADSTATE),
+
+ FDT_ERRTABENT(FDT_ERR_TRUNCATED),
+ FDT_ERRTABENT(FDT_ERR_BADMAGIC),
+ FDT_ERRTABENT(FDT_ERR_BADVERSION),
+ FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE),
+ FDT_ERRTABENT(FDT_ERR_BADLAYOUT),
+};
+#define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0]))
+
+const char __attrconst *fdt_strerror(int errval)
+{
+ if (errval > 0)
+ return "<valid offset/length>";
+ else if (errval == 0)
+ return "<no error>";
+ else if (errval > -FDT_ERRTABSIZE) {
+ const char *s = fdt_errtable[-errval].str;
+
+ if (s)
+ return s;
+ }
+
+ return "<unknown error>";
+}
diff --git a/libfdt/fdt_sw.c b/libfdt/fdt_sw.c
new file mode 100644
index 0000000..784b672
--- /dev/null
+++ b/libfdt/fdt_sw.c
@@ -0,0 +1,280 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+static int _fdt_sw_check_header(void *fdt)
+{
+ if (fdt_magic(fdt) != FDT_SW_MAGIC)
+ return -FDT_ERR_BADMAGIC;
+ /* FIXME: should check more details about the header state */
+ return 0;
+}
+
+#define FDT_SW_CHECK_HEADER(fdt) \
+ { \
+ int err; \
+ if ((err = _fdt_sw_check_header(fdt)) != 0) \
+ return err; \
+ }
+
+static void *_fdt_grab_space(void *fdt, size_t len)
+{
+ int offset = fdt_size_dt_struct(fdt);
+ int spaceleft;
+
+ spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt)
+ - fdt_size_dt_strings(fdt);
+
+ if ((offset + len < offset) || (offset + len > spaceleft))
+ return NULL;
+
+ fdt_set_size_dt_struct(fdt, offset + len);
+ return _fdt_offset_ptr_w(fdt, offset);
+}
+
+int fdt_create(void *buf, int bufsize)
+{
+ void *fdt = buf;
+
+ if (bufsize < sizeof(struct fdt_header))
+ return -FDT_ERR_NOSPACE;
+
+ memset(buf, 0, bufsize);
+
+ fdt_set_magic(fdt, FDT_SW_MAGIC);
+ fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION);
+ fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
+ fdt_set_totalsize(fdt, bufsize);
+
+ fdt_set_off_mem_rsvmap(fdt, FDT_ALIGN(sizeof(struct fdt_header),
+ sizeof(struct fdt_reserve_entry)));
+ fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt));
+ fdt_set_off_dt_strings(fdt, bufsize);
+
+ return 0;
+}
+
+int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
+{
+ struct fdt_reserve_entry *re;
+ int offset;
+
+ FDT_SW_CHECK_HEADER(fdt);
+
+ if (fdt_size_dt_struct(fdt))
+ return -FDT_ERR_BADSTATE;
+
+ offset = fdt_off_dt_struct(fdt);
+ if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
+ return -FDT_ERR_NOSPACE;
+
+ re = (struct fdt_reserve_entry *)((char *)fdt + offset);
+ re->address = cpu_to_fdt64(addr);
+ re->size = cpu_to_fdt64(size);
+
+ fdt_set_off_dt_struct(fdt, offset + sizeof(*re));
+
+ return 0;
+}
+
+int fdt_finish_reservemap(void *fdt)
+{
+ return fdt_add_reservemap_entry(fdt, 0, 0);
+}
+
+int fdt_begin_node(void *fdt, const char *name)
+{
+ struct fdt_node_header *nh;
+ int namelen = strlen(name) + 1;
+
+ FDT_SW_CHECK_HEADER(fdt);
+
+ nh = _fdt_grab_space(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen));
+ if (! nh)
+ return -FDT_ERR_NOSPACE;
+
+ nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
+ memcpy(nh->name, name, namelen);
+ return 0;
+}
+
+int fdt_end_node(void *fdt)
+{
+ uint32_t *en;
+
+ FDT_SW_CHECK_HEADER(fdt);
+
+ en = _fdt_grab_space(fdt, FDT_TAGSIZE);
+ if (! en)
+ return -FDT_ERR_NOSPACE;
+
+ *en = cpu_to_fdt32(FDT_END_NODE);
+ return 0;
+}
+
+static int _fdt_find_add_string(void *fdt, const char *s)
+{
+ char *strtab = (char *)fdt + fdt_totalsize(fdt);
+ const char *p;
+ int strtabsize = fdt_size_dt_strings(fdt);
+ int len = strlen(s) + 1;
+ int struct_top, offset;
+
+ p = _fdt_find_string(strtab - strtabsize, strtabsize, s);
+ if (p)
+ return p - strtab;
+
+ /* Add it */
+ offset = -strtabsize - len;
+ struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
+ if (fdt_totalsize(fdt) + offset < struct_top)
+ return 0; /* no more room :( */
+
+ memcpy(strtab + offset, s, len);
+ fdt_set_size_dt_strings(fdt, strtabsize + len);
+ return offset;
+}
+
+int fdt_property(void *fdt, const char *name, const void *val, int len)
+{
+ struct fdt_property *prop;
+ int nameoff;
+
+ FDT_SW_CHECK_HEADER(fdt);
+
+ nameoff = _fdt_find_add_string(fdt, name);
+ if (nameoff == 0)
+ return -FDT_ERR_NOSPACE;
+
+ prop = _fdt_grab_space(fdt, sizeof(*prop) + FDT_TAGALIGN(len));
+ if (! prop)
+ return -FDT_ERR_NOSPACE;
+
+ prop->tag = cpu_to_fdt32(FDT_PROP);
+ prop->nameoff = cpu_to_fdt32(nameoff);
+ prop->len = cpu_to_fdt32(len);
+ memcpy(prop->data, val, len);
+ return 0;
+}
+
+int fdt_property_cells_v(void *fdt, unsigned const char *name, int count,
+ va_list args)
+{
+ uint32_t buffer[count];
+ int i;
+
+ for (i = 0; i < count; i++)
+ buffer[i] = cpu_to_fdt32(va_arg(args, uint32_t));
+
+ return fdt_property(fdt, name, buffer, sizeof(buffer));
+}
+
+int fdt_property_cells(void *fdt, unsigned const char *name, int count, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, count);
+ ret = fdt_property_cells_v(fdt, name, count, args);
+ va_end(args);
+
+ return ret;
+}
+
+int fdt_finish(void *fdt)
+{
+ char *p = (char *)fdt;
+ uint32_t *end;
+ int oldstroffset, newstroffset;
+ uint32_t tag;
+ int offset, nextoffset;
+
+ FDT_SW_CHECK_HEADER(fdt);
+
+ /* Add terminator */
+ end = _fdt_grab_space(fdt, sizeof(*end));
+ if (! end)
+ return -FDT_ERR_NOSPACE;
+ *end = cpu_to_fdt32(FDT_END);
+
+ /* Relocate the string table */
+ oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt);
+ newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
+ memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt));
+ fdt_set_off_dt_strings(fdt, newstroffset);
+
+ /* Walk the structure, correcting string offsets */
+ offset = 0;
+ while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) {
+ if (tag == FDT_PROP) {
+ struct fdt_property *prop =
+ _fdt_offset_ptr_w(fdt, offset);
+ int nameoff;
+
+ nameoff = fdt32_to_cpu(prop->nameoff);
+ nameoff += fdt_size_dt_strings(fdt);
+ prop->nameoff = cpu_to_fdt32(nameoff);
+ }
+ offset = nextoffset;
+ }
+ if (nextoffset < 0)
+ return nextoffset;
+
+ /* Finally, adjust the header */
+ fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt));
+ fdt_set_magic(fdt, FDT_MAGIC);
+ return 0;
+}
diff --git a/libfdt/fdt_wip.c b/libfdt/fdt_wip.c
new file mode 100644
index 0000000..6025fa1
--- /dev/null
+++ b/libfdt/fdt_wip.c
@@ -0,0 +1,118 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len)
+{
+ void *propval;
+ int proplen;
+
+ propval = fdt_getprop_w(fdt, nodeoffset, name, &proplen);
+ if (! propval)
+ return proplen;
+
+ if (proplen != len)
+ return -FDT_ERR_NOSPACE;
+
+ memcpy(propval, val, len);
+ return 0;
+}
+
+static void _fdt_nop_region(void *start, int len)
+{
+ uint32_t *p;
+
+ for (p = start; (char *)p < ((char *)start + len); p++)
+ *p = cpu_to_fdt32(FDT_NOP);
+}
+
+int fdt_nop_property(void *fdt, int nodeoffset, const char *name)
+{
+ struct fdt_property *prop;
+ int len;
+
+ prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
+ if (! prop)
+ return len;
+
+ _fdt_nop_region(prop, len + sizeof(*prop));
+
+ return 0;
+}
+
+int _fdt_node_end_offset(void *fdt, int offset)
+{
+ int depth = 0;
+
+ while ((offset >= 0) && (depth >= 0))
+ offset = fdt_next_node(fdt, offset, &depth);
+
+ return offset;
+}
+
+int fdt_nop_node(void *fdt, int nodeoffset)
+{
+ int endoffset;
+
+ endoffset = _fdt_node_end_offset(fdt, nodeoffset);
+ if (endoffset < 0)
+ return endoffset;
+
+ _fdt_nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0),
+ endoffset - nodeoffset);
+ return 0;
+}
diff --git a/libfdt/libfdt.h b/libfdt/libfdt.h
new file mode 100644
index 0000000..0035bf7
--- /dev/null
+++ b/libfdt/libfdt.h
@@ -0,0 +1,1168 @@
+#ifndef _LIBFDT_H
+#define _LIBFDT_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <libfdt_env.h>
+#include <fdt.h>
+
+#define FDT_FIRST_SUPPORTED_VERSION 0x10
+#define FDT_LAST_SUPPORTED_VERSION 0x11
+
+/* Error codes: informative error codes */
+#define FDT_ERR_NOTFOUND 1
+ /* FDT_ERR_NOTFOUND: The requested node or property does not exist */
+#define FDT_ERR_EXISTS 2
+ /* FDT_ERR_EXISTS: Attemped to create a node or property which
+ * already exists */
+#define FDT_ERR_NOSPACE 3
+ /* FDT_ERR_NOSPACE: Operation needed to expand the device
+ * tree, but its buffer did not have sufficient space to
+ * contain the expanded tree. Use fdt_open_into() to move the
+ * device tree to a buffer with more space. */
+
+/* Error codes: codes for bad parameters */
+#define FDT_ERR_BADOFFSET 4
+ /* FDT_ERR_BADOFFSET: Function was passed a structure block
+ * offset which is out-of-bounds, or which points to an
+ * unsuitable part of the structure for the operation. */
+#define FDT_ERR_BADPATH 5
+ /* FDT_ERR_BADPATH: Function was passed a badly formatted path
+ * (e.g. missing a leading / for a function which requires an
+ * absolute path) */
+#define FDT_ERR_BADPHANDLE 6
+ /* FDT_ERR_BADPHANDLE: Function was passed an invalid phandle
+ * value. phandle values of 0 and -1 are not permitted. */
+#define FDT_ERR_BADSTATE 7
+ /* FDT_ERR_BADSTATE: Function was passed an incomplete device
+ * tree created by the sequential-write functions, which is
+ * not sufficiently complete for the requested operation. */
+
+/* Error codes: codes for bad device tree blobs */
+#define FDT_ERR_TRUNCATED 8
+ /* FDT_ERR_TRUNCATED: Structure block of the given device tree
+ * ends without an FDT_END tag. */
+#define FDT_ERR_BADMAGIC 9
+ /* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a
+ * device tree at all - it is missing the flattened device
+ * tree magic number. */
+#define FDT_ERR_BADVERSION 10
+ /* FDT_ERR_BADVERSION: Given device tree has a version which
+ * can't be handled by the requested operation. For
+ * read-write functions, this may mean that fdt_open_into() is
+ * required to convert the tree to the expected version. */
+#define FDT_ERR_BADSTRUCTURE 11
+ /* FDT_ERR_BADSTRUCTURE: Given device tree has a corrupt
+ * structure block or other serious error (e.g. misnested
+ * nodes, or subnodes preceding properties). */
+#define FDT_ERR_BADLAYOUT 12
+ /* FDT_ERR_BADLAYOUT: For read-write functions, the given
+ * device tree has it's sub-blocks in an order that the
+ * function can't handle (memory reserve map, then structure,
+ * then strings). Use fdt_open_into() to reorganize the tree
+ * into a form suitable for the read-write operations. */
+
+/* "Can't happen" error indicating a bug in libfdt */
+#define FDT_ERR_INTERNAL 13
+ /* FDT_ERR_INTERNAL: libfdt has failed an internal assertion.
+ * Should never be returned, if it is, it indicates a bug in
+ * libfdt itself. */
+
+#define FDT_ERR_MAX 13
+
+/**********************************************************************/
+/* Low-level functions (you probably don't need these) */
+/**********************************************************************/
+
+const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int checklen);
+static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
+{
+ return (void *)(uintptr_t)fdt_offset_ptr(fdt, offset, checklen);
+}
+
+uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
+
+/**********************************************************************/
+/* Traversal functions */
+/**********************************************************************/
+
+int fdt_next_node(const void *fdt, int offset, int *depth);
+
+/**********************************************************************/
+/* General functions */
+/**********************************************************************/
+
+#define fdt_get_header(fdt, field) \
+ (fdt32_to_cpu(((const struct fdt_header *)(fdt))->field))
+#define fdt_magic(fdt) (fdt_get_header(fdt, magic))
+#define fdt_totalsize(fdt) (fdt_get_header(fdt, totalsize))
+#define fdt_off_dt_struct(fdt) (fdt_get_header(fdt, off_dt_struct))
+#define fdt_off_dt_strings(fdt) (fdt_get_header(fdt, off_dt_strings))
+#define fdt_off_mem_rsvmap(fdt) (fdt_get_header(fdt, off_mem_rsvmap))
+#define fdt_version(fdt) (fdt_get_header(fdt, version))
+#define fdt_last_comp_version(fdt) (fdt_get_header(fdt, last_comp_version))
+#define fdt_boot_cpuid_phys(fdt) (fdt_get_header(fdt, boot_cpuid_phys))
+#define fdt_size_dt_strings(fdt) (fdt_get_header(fdt, size_dt_strings))
+#define fdt_size_dt_struct(fdt) (fdt_get_header(fdt, size_dt_struct))
+
+#define __fdt_set_hdr(name) \
+ static inline void fdt_set_##name(void *fdt, uint32_t val) \
+ { \
+ struct fdt_header *fdth = (struct fdt_header*)fdt; \
+ fdth->name = cpu_to_fdt32(val); \
+ }
+__fdt_set_hdr(magic);
+__fdt_set_hdr(totalsize);
+__fdt_set_hdr(off_dt_struct);
+__fdt_set_hdr(off_dt_strings);
+__fdt_set_hdr(off_mem_rsvmap);
+__fdt_set_hdr(version);
+__fdt_set_hdr(last_comp_version);
+__fdt_set_hdr(boot_cpuid_phys);
+__fdt_set_hdr(size_dt_strings);
+__fdt_set_hdr(size_dt_struct);
+#undef __fdt_set_hdr
+
+/**
+ * fdt_check_header - sanity check a device tree or possible device tree
+ * @fdt: pointer to data which might be a flattened device tree
+ *
+ * fdt_check_header() checks that the given buffer contains what
+ * appears to be a flattened device tree with sane information in its
+ * header.
+ *
+ * returns:
+ * 0, if the buffer appears to contain a valid device tree
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings, as above
+ */
+int fdt_check_header(const void *fdt);
+
+/**
+ * fdt_move - move a device tree around in memory
+ * @fdt: pointer to the device tree to move
+ * @buf: pointer to memory where the device is to be moved
+ * @bufsize: size of the memory space at buf
+ *
+ * fdt_move() relocates, if possible, the device tree blob located at
+ * fdt to the buffer at buf of size bufsize. The buffer may overlap
+ * with the existing device tree blob at fdt. Therefore,
+ * fdt_move(fdt, fdt, fdt_totalsize(fdt))
+ * should always succeed.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, bufsize is insufficient to contain the device tree
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_move(const void *fdt, void *buf, int bufsize);
+
+/**********************************************************************/
+/* Read-only functions */
+/**********************************************************************/
+
+/**
+ * fdt_string - retrieve a string from the strings block of a device tree
+ * @fdt: pointer to the device tree blob
+ * @stroffset: offset of the string within the strings block (native endian)
+ *
+ * fdt_string() retrieves a pointer to a single string from the
+ * strings block of the device tree blob at fdt.
+ *
+ * returns:
+ * a pointer to the string, on success
+ * NULL, if stroffset is out of bounds
+ */
+const char *fdt_string(const void *fdt, int stroffset);
+
+/**
+ * fdt_num_mem_rsv - retrieve the number of memory reserve map entries
+ * @fdt: pointer to the device tree blob
+ *
+ * Returns the number of entries in the device tree blob's memory
+ * reservation map. This does not include the terminating 0,0 entry
+ * or any other (0,0) entries reserved for expansion.
+ *
+ * returns:
+ * the number of entries
+ */
+int fdt_num_mem_rsv(const void *fdt);
+
+/**
+ * fdt_get_mem_rsv - retrieve one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @address, @size: pointers to 64-bit variables
+ *
+ * On success, *address and *size will contain the address and size of
+ * the n-th reserve map entry from the device tree blob, in
+ * native-endian format.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size);
+
+/**
+ * fdt_subnode_offset_namelen - find a subnode based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_subnode_offset(), but only examine the first
+ * namelen characters of name for matching the subnode name. This is
+ * useful for finding subnodes based on a portion of a larger string,
+ * such as a full path.
+ */
+int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
+ const char *name, int namelen);
+/**
+ * fdt_sibling_offset_namelen - find sibling node based on substring
+ * @fdt: pointer to the device tree blob
+ * @fromoffset: node to start from
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Typically used to continue the search started with
+ * fdt_subnode_offset_namelen() using the same matching rules.
+ */
+int fdt_sibling_offset_namelen(const void *fdt, int fromoffset,
+ const char *name, int namelen);
+/**
+ * fdt_subnode_offset - find a subnode of a given node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_subnode_offset() finds a subnode of the node at structure block
+ * offset parentoffset with the given name. name may include a unit
+ * address, in which case fdt_subnode_offset() will find the subnode
+ * with that unit address, or the unit address may be omitted, in
+ * which case fdt_subnode_offset() will find an arbitrary subnode
+ * whose name excluding unit address matches the given name.
+ *
+ * returns:
+ * structure block offset of the requested subnode (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_sibling_offset - find a sibling of a given node by name
+ * @fdt: pointer to the device tree blob
+ * @fromoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * Typically used to continue the search started with fdt_subnode_offset()
+ * using the same matching rules.
+ *
+ * returns:
+ * structure block offset of the requested subnode (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_sibling_offset(const void *fdt, int fromoffset, const char *name);
+
+/**
+ * fdt_path_offset - find a tree node by its full path
+ * @fdt: pointer to the device tree blob
+ * @path: full path of the node to locate
+ *
+ * fdt_path_offset() finds a node of a given path in the device tree.
+ * Each path component may omit the unit address portion, but the
+ * results of this are undefined if any such path component is
+ * ambiguous (that is if there are multiple nodes at the relevant
+ * level matching the given component, differentiated only by unit
+ * address).
+ *
+ * returns:
+ * structure block offset of the node with the requested path (>=0), on success
+ * -FDT_ERR_BADPATH, given path does not begin with '/' or is invalid
+ * -FDT_ERR_NOTFOUND, if the requested node does not exist
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_path_offset(const void *fdt, const char *path);
+
+/**
+ * fdt_get_name - retrieve the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the starting node
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_name() retrieves the name (including unit address) of the
+ * device tree node at structure block offset nodeoffset. If lenp is
+ * non-NULL, the length of this name is also returned, in the integer
+ * pointed to by lenp.
+ *
+ * returns:
+ * pointer to the node's name, on success
+ * If lenp is non-NULL, *lenp contains the length of that name (>=0)
+ * NULL, on error
+ * if lenp is non-NULL *lenp contains an error code (<0):
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp);
+
+/**
+ * fdt_get_property_namelen - find a property based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_get_property_namelen(), but only examine the first
+ * namelen characters of name for matching the property name.
+ */
+const struct fdt_property *fdt_get_property_namelen(const void *fdt,
+ int nodeoffset,
+ const char *name,
+ int namelen, int *lenp);
+
+/**
+ * fdt_get_property - find a given property in a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_get_property() retrieves a pointer to the fdt_property
+ * structure within the device tree blob corresponding to the property
+ * named 'name' of the node at offset nodeoffset. If lenp is
+ * non-NULL, the length of the property value is also returned, in the
+ * integer pointed to by lenp.
+ *
+ * returns:
+ * pointer to the structure representing the property
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_NOTFOUND, node does not have named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset,
+ const char *name, int *lenp);
+static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
+ const char *name,
+ int *lenp)
+{
+ return (struct fdt_property *)(uintptr_t)
+ fdt_get_property(fdt, nodeoffset, name, lenp);
+}
+
+/**
+ * fdt_getprop_namelen - get property value based on substring
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @namelen: number of characters of name to consider
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * Identical to fdt_getprop(), but only examine the first namelen
+ * characters of name for matching the property name.
+ */
+const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
+ const char *name, int namelen, int *lenp);
+
+/**
+ * fdt_getprop - retrieve the value of a given property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to find
+ * @name: name of the property to find
+ * @lenp: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_getprop() retrieves a pointer to the value of the property
+ * named 'name' of the node at offset nodeoffset (this will be a
+ * pointer to within the device blob itself, not a copy of the value).
+ * If lenp is non-NULL, the length of the property value is also
+ * returned, in the integer pointed to by lenp.
+ *
+ * returns:
+ * pointer to the property's value
+ * if lenp is non-NULL, *lenp contains the length of the property
+ * value (>=0)
+ * NULL, on error
+ * if lenp is non-NULL, *lenp contains an error code (<0):
+ * -FDT_ERR_NOTFOUND, node does not have named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+const void *fdt_getprop(const void *fdt, int nodeoffset,
+ const char *name, int *lenp);
+static inline void *fdt_getprop_w(void *fdt, int nodeoffset,
+ const char *name, int *lenp)
+{
+ return (void *)(uintptr_t)fdt_getprop(fdt, nodeoffset, name, lenp);
+}
+
+/**
+ * fdt_get_phandle - retrieve the phandle of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of the node
+ *
+ * fdt_get_phandle() retrieves the phandle of the device tree node at
+ * structure block offset nodeoffset.
+ *
+ * returns:
+ * the phandle of the node at nodeoffset, on success (!= 0, != -1)
+ * 0, if the node has no phandle, or another error occurs
+ */
+uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_get_alias_namelen - get alias based on substring
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_get_alias(), but only examine the first namelen
+ * characters of name for matching the alias name.
+ */
+const char *fdt_get_alias_namelen(const void *fdt,
+ const char *name, int namelen);
+
+/**
+ * fdt_get_alias - retreive the path referenced by a given alias
+ * @fdt: pointer to the device tree blob
+ * @name: name of the alias th look up
+ *
+ * fdt_get_alias() retrieves the value of a given alias. That is, the
+ * value of the property named 'name' in the node /aliases.
+ *
+ * returns:
+ * a pointer to the expansion of the alias named 'name', of it exists
+ * NULL, if the given alias or the /aliases node does not exist
+ */
+const char *fdt_get_alias(const void *fdt, const char *name);
+
+/**
+ * fdt_get_path - determine the full path of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose path to find
+ * @buf: character buffer to contain the returned path (will be overwritten)
+ * @buflen: size of the character buffer at buf
+ *
+ * fdt_get_path() computes the full path of the node at offset
+ * nodeoffset, and records that path in the buffer at buf.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ * 0, on success
+ * buf contains the absolute path of the node at
+ * nodeoffset, as a NUL-terminated string.
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_NOSPACE, the path of the given node is longer than (bufsize-1)
+ * characters and will not fit in the given buffer.
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen);
+
+/**
+ * fdt_supernode_atdepth_offset - find a specific ancestor of a node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ * @supernodedepth: depth of the ancestor to find
+ * @nodedepth: pointer to an integer variable (will be overwritten) or NULL
+ *
+ * fdt_supernode_atdepth_offset() finds an ancestor of the given node
+ * at a specific depth from the root (where the root itself has depth
+ * 0, its immediate subnodes depth 1 and so forth). So
+ * fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, NULL);
+ * will always return 0, the offset of the root node. If the node at
+ * nodeoffset has depth D, then:
+ * fdt_supernode_atdepth_offset(fdt, nodeoffset, D, NULL);
+ * will return nodeoffset itself.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+
+ * structure block offset of the node at node offset's ancestor
+ * of depth supernodedepth (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+* -FDT_ERR_NOTFOUND, supernodedepth was greater than the depth of nodeoffset
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
+ int supernodedepth, int *nodedepth);
+
+/**
+ * fdt_node_depth - find the depth of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_node_depth() finds the depth of a given node. The root node
+ * has depth 0, its immediate subnodes depth 1 and so forth.
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset.
+ *
+ * returns:
+ * depth of the node at nodeoffset (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_depth(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_parent_offset - find the parent of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose parent to find
+ *
+ * fdt_parent_offset() locates the parent node of a given node (that
+ * is, it finds the offset of the node which contains the node at
+ * nodeoffset as a subnode).
+ *
+ * NOTE: This function is expensive, as it must scan the device tree
+ * structure from the start to nodeoffset, *twice*.
+ *
+ * returns:
+ * structure block offset of the parent of the node at nodeoffset
+ * (>=0), on success
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_parent_offset(const void *fdt, int nodeoffset);
+
+/**
+ * fdt_node_offset_by_prop_value - find nodes with a given property value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @propname: property name to check
+ * @propval: property value to search for
+ * @proplen: length of the value in propval
+ *
+ * fdt_node_offset_by_prop_value() returns the offset of the first
+ * node after startoffset, which has a property named propname whose
+ * value is of length proplen and has value equal to propval; or if
+ * startoffset is -1, the very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ * offset = fdt_node_offset_by_prop_value(fdt, -1, propname,
+ * propval, proplen);
+ * while (offset != -FDT_ERR_NOTFOUND) {
+ * // other code here
+ * offset = fdt_node_offset_by_prop_value(fdt, offset, propname,
+ * propval, proplen);
+ * }
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0, >startoffset),
+ * on success
+ * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ * tree after startoffset
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
+ const char *propname,
+ const void *propval, int proplen);
+
+/**
+ * fdt_node_offset_by_phandle - find the node with a given phandle
+ * @fdt: pointer to the device tree blob
+ * @phandle: phandle value
+ *
+ * fdt_node_offset_by_phandle() returns the offset of the node
+ * which has the given phandle value. If there is more than one node
+ * in the tree with the given phandle (an invalid tree), results are
+ * undefined.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0), on success
+ * -FDT_ERR_NOTFOUND, no node with that phandle exists
+ * -FDT_ERR_BADPHANDLE, given phandle value was invalid (0 or -1)
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle);
+
+/**
+ * fdt_node_check_compatible: check a node's compatible property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of a tree node
+ * @compatible: string to match against
+ *
+ *
+ * fdt_node_check_compatible() returns 0 if the given node contains a
+ * 'compatible' property with the given string as one of its elements,
+ * it returns non-zero otherwise, or on error.
+ *
+ * returns:
+ * 0, if the node has a 'compatible' property listing the given string
+ * 1, if the node has a 'compatible' property, but it does not list
+ * the given string
+ * -FDT_ERR_NOTFOUND, if the given node has no 'compatible' property
+ * -FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_check_compatible(const void *fdt, int nodeoffset,
+ const char *compatible);
+
+/**
+ * fdt_node_offset_by_compatible - find nodes with a given 'compatible' value
+ * @fdt: pointer to the device tree blob
+ * @startoffset: only find nodes after this offset
+ * @compatible: 'compatible' string to match against
+ *
+ * fdt_node_offset_by_compatible() returns the offset of the first
+ * node after startoffset, which has a 'compatible' property which
+ * lists the given compatible string; or if startoffset is -1, the
+ * very first such node in the tree.
+ *
+ * To iterate through all nodes matching the criterion, the following
+ * idiom can be used:
+ * offset = fdt_node_offset_by_compatible(fdt, -1, compatible);
+ * while (offset != -FDT_ERR_NOTFOUND) {
+ * // other code here
+ * offset = fdt_node_offset_by_compatible(fdt, offset, compatible);
+ * }
+ *
+ * Note the -1 in the first call to the function, if 0 is used here
+ * instead, the function will never locate the root node, even if it
+ * matches the criterion.
+ *
+ * returns:
+ * structure block offset of the located node (>= 0, >startoffset),
+ * on success
+ * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the
+ * tree after startoffset
+ * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE, standard meanings
+ */
+int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
+ const char *compatible);
+
+/**********************************************************************/
+/* Write-in-place functions */
+/**********************************************************************/
+
+/**
+ * fdt_setprop_inplace - change a property's value, but not its size
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to replace the property value with
+ * @len: length of the property value
+ *
+ * fdt_setprop_inplace() replaces the value of a given property with
+ * the data in val, of length len. This function cannot change the
+ * size of a property, and so will only work if len is equal to the
+ * current length of the property.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, if len is not equal to the property's current length
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len);
+
+/**
+ * fdt_setprop_inplace_cell - change the value of a single-cell property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: cell (32-bit integer) value to replace the property with
+ *
+ * fdt_setprop_inplace_cell() replaces the value of a given property
+ * with the 32-bit integer cell value in val, converting val to
+ * big-endian if necessary. This function cannot change the size of a
+ * property, and so will only work if the property already exists and
+ * has length 4.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the given property value, and will not alter or move any other part
+ * of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, if the property's length is not equal to 4
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset,
+ const char *name, uint32_t val)
+{
+ val = cpu_to_fdt32(val);
+ return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_nop_property - replace a property with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_nop_property() will replace a given property's representation
+ * in the blob with FDT_NOP tags, effectively removing it from the
+ * tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the property, and will not alter or move any other part of the
+ * tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_property(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_nop_node - replace a node (subtree) with nop tags
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_nop_node() will replace a given node's representation in the
+ * blob, including all its subnodes, if any, with FDT_NOP tags,
+ * effectively removing it from the tree.
+ *
+ * This function will alter only the bytes in the blob which contain
+ * the node and its properties and subnodes, and will not alter or
+ * move any other part of the tree.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_nop_node(void *fdt, int nodeoffset);
+
+/**********************************************************************/
+/* Sequential write functions */
+/**********************************************************************/
+
+int fdt_create(void *buf, int bufsize);
+int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size);
+int fdt_finish_reservemap(void *fdt);
+int fdt_begin_node(void *fdt, const char *name);
+int fdt_property(void *fdt, const char *name, const void *val, int len);
+static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
+{
+ val = cpu_to_fdt32(val);
+ return fdt_property(fdt, name, &val, sizeof(val));
+}
+#define fdt_property_string(fdt, name, str) \
+ fdt_property(fdt, name, str, strlen(str)+1)
+int fdt_property_cells_v(void *fdt, unsigned const char *name, int count,
+ va_list args);
+int fdt_property_cells(void *fdt, unsigned const char *name, int count, ...);
+int fdt_end_node(void *fdt);
+int fdt_finish(void *fdt);
+
+/**********************************************************************/
+/* Read-write functions */
+/**********************************************************************/
+
+int fdt_open_into(const void *fdt, void *buf, int bufsize);
+int fdt_pack(void *fdt);
+
+/**
+ * fdt_add_mem_rsv - add one memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @address, @size: 64-bit values (native endian)
+ *
+ * Adds a reserve map entry to the given blob reserving a region at
+ * address address of length size.
+ *
+ * This function will insert data into the reserve map and will
+ * therefore change the indexes of some entries in the table.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new reservation entry
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size);
+
+/**
+ * fdt_del_mem_rsv - remove a memory reserve map entry
+ * @fdt: pointer to the device tree blob
+ * @n: entry to remove
+ *
+ * fdt_del_mem_rsv() removes the n-th memory reserve map entry from
+ * the blob.
+ *
+ * This function will delete data from the reservation table and will
+ * therefore change the indexes of some entries in the table.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, there is no entry of the given index (i.e. there
+ * are less than n+1 reserve map entries)
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_mem_rsv(void *fdt, int n);
+
+/**
+ * fdt_set_name - change the name of a given node
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: structure block offset of a node
+ * @name: name to give the node
+ *
+ * fdt_set_name() replaces the name (including unit address, if any)
+ * of the given node with the given string. NOTE: this function can't
+ * efficiently check if the new name is unique amongst the given
+ * node's siblings; results are undefined if this function is invoked
+ * with a name equal to one of the given node's siblings.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob
+ * to contain the new name
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE, standard meanings
+ */
+int fdt_set_name(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_setprop - create or change a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: pointer to data to set the property value to
+ * @len: length of the property value
+ *
+ * fdt_setprop() sets the value of the named property in the given
+ * node to the given value and length, creating the property if it
+ * does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len);
+
+/**
+ * fdt_setprop_cell - set a property to a single cell value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @val: 32-bit integer value for the property (native endian)
+ *
+ * fdt_setprop_cell() sets the value of the named property in the
+ * given node to the given cell value (converting to big-endian if
+ * necessary), or creates a new property with that value if it does
+ * not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
+ uint32_t val)
+{
+ val = cpu_to_fdt32(val);
+ return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val));
+}
+
+/**
+ * fdt_setprop_string - set a property to a string value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @str: string value for the property
+ *
+ * fdt_setprop_string() sets the value of the named property in the
+ * given node to the given string value (using the length of the
+ * string to determine the new length of the property), or creates a
+ * new property with that value if it does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_setprop_string(fdt, nodeoffset, name, str) \
+ fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
+/**
+ * fdt_delprop - delete a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to nop
+ * @name: name of the property to nop
+ *
+ * fdt_del_property() will delete the given property.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOTFOUND, node does not have the named property
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_delprop(void *fdt, int nodeoffset, const char *name);
+
+/**
+ * fdt_add_subnode_namelen - creates a new node based on substring
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ * @namelen: number of characters of name to consider
+ *
+ * Identical to fdt_add_subnode(), but use only the first namelen
+ * characters of name as the name of the new node. This is useful for
+ * creating subnodes based on a portion of a larger string, such as a
+ * full path.
+ */
+int fdt_add_subnode_namelen(void *fdt, int parentoffset,
+ const char *name, int namelen);
+
+/**
+ * fdt_add_subnode - creates a new node
+ * @fdt: pointer to the device tree blob
+ * @parentoffset: structure block offset of a node
+ * @name: name of the subnode to locate
+ *
+ * fdt_add_subnode() creates a new node as a subnode of the node at
+ * structure block offset parentoffset, with the given name (which
+ * should include the unit address, if any).
+ *
+ * This function will insert data into the blob, and will therefore
+ * change the offsets of some existing nodes.
+
+ * returns:
+ * structure block offset of the created nodeequested subnode (>=0), on success
+ * -FDT_ERR_NOTFOUND, if the requested subnode does not exist
+ * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE tag
+ * -FDT_ERR_EXISTS, if the node at parentoffset already has a subnode of
+ * the given name
+ * -FDT_ERR_NOSPACE, if there is insufficient free space in the
+ * blob to contain the new node
+ * -FDT_ERR_NOSPACE
+ * -FDT_ERR_BADLAYOUT
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings.
+ */
+int fdt_add_subnode(void *fdt, int parentoffset, const char *name);
+
+/**
+ * fdt_del_node - delete a node (subtree)
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node to nop
+ *
+ * fdt_del_node() will remove the given node, including all its
+ * subnodes if any, from the blob.
+ *
+ * This function will delete data from the blob, and will therefore
+ * change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_del_node(void *fdt, int nodeoffset);
+
+/**********************************************************************/
+/* Debugging / informational functions */
+/**********************************************************************/
+
+const char *fdt_strerror(int errval);
+
+#endif /* _LIBFDT_H */
diff --git a/libfdt/libfdt_env.h b/libfdt/libfdt_env.h
new file mode 100644
index 0000000..0d9a131
--- /dev/null
+++ b/libfdt/libfdt_env.h
@@ -0,0 +1,24 @@
+#ifndef _LIBFDT_ENV_H
+#define _LIBFDT_ENV_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+
+#define _B(n) ((unsigned long long)((uint8_t *)&x)[n])
+static inline uint32_t fdt32_to_cpu(uint32_t x)
+{
+ return (_B(0) << 24) | (_B(1) << 16) | (_B(2) << 8) | _B(3);
+}
+#define cpu_to_fdt32(x) fdt32_to_cpu(x)
+
+static inline uint64_t fdt64_to_cpu(uint64_t x)
+{
+ return (_B(0) << 56) | (_B(1) << 48) | (_B(2) << 40) | (_B(3) << 32)
+ | (_B(4) << 24) | (_B(5) << 16) | (_B(6) << 8) | _B(7);
+}
+#define cpu_to_fdt64(x) fdt64_to_cpu(x)
+#undef _B
+
+#endif /* _LIBFDT_ENV_H */
diff --git a/libfdt/libfdt_internal.h b/libfdt/libfdt_internal.h
new file mode 100644
index 0000000..d2dcbd6
--- /dev/null
+++ b/libfdt/libfdt_internal.h
@@ -0,0 +1,94 @@
+#ifndef _LIBFDT_INTERNAL_H
+#define _LIBFDT_INTERNAL_H
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2006 David Gibson, IBM Corporation.
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fdt.h>
+
+#define FDT_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#define FDT_TAGALIGN(x) (FDT_ALIGN((x), FDT_TAGSIZE))
+
+#define FDT_CHECK_HEADER(fdt) \
+ { \
+ int err; \
+ if ((err = fdt_check_header(fdt)) != 0) \
+ return err; \
+ }
+
+int _fdt_check_node_offset(const void *fdt, int offset);
+const char *_fdt_find_string(const char *strtab, int tabsize, const char *s);
+int _fdt_node_end_offset(void *fdt, int nodeoffset);
+
+static inline const void *_fdt_offset_ptr(const void *fdt, int offset)
+{
+ return (const char *)fdt + fdt_off_dt_struct(fdt) + offset;
+}
+
+static inline void *_fdt_offset_ptr_w(void *fdt, int offset)
+{
+ return (void *)(uintptr_t)_fdt_offset_ptr(fdt, offset);
+}
+
+static inline const struct fdt_reserve_entry *_fdt_mem_rsv(const void *fdt, int n)
+{
+ const struct fdt_reserve_entry *rsv_table =
+ (const struct fdt_reserve_entry *)
+ ((const char *)fdt + fdt_off_mem_rsvmap(fdt));
+
+ return rsv_table + n;
+}
+static inline struct fdt_reserve_entry *_fdt_mem_rsv_w(void *fdt, int n)
+{
+ return (void *)(uintptr_t)_fdt_mem_rsv(fdt, n);
+}
+
+#define FDT_SW_MAGIC (~FDT_MAGIC)
+
+#endif /* _LIBFDT_INTERNAL_H */
diff --git a/libfdt/version.lds b/libfdt/version.lds
new file mode 100644
index 0000000..3c3994e
--- /dev/null
+++ b/libfdt/version.lds
@@ -0,0 +1,54 @@
+LIBFDT_1.2 {
+ global:
+ fdt_next_node;
+ fdt_check_header;
+ fdt_move;
+ fdt_string;
+ fdt_num_mem_rsv;
+ fdt_get_mem_rsv;
+ fdt_subnode_offset_namelen;
+ fdt_subnode_offset;
+ fdt_path_offset;
+ fdt_get_name;
+ fdt_get_property_namelen;
+ fdt_get_property;
+ fdt_getprop_namelen;
+ fdt_getprop;
+ fdt_get_phandle;
+ fdt_get_alias_namelen;
+ fdt_get_alias;
+ fdt_get_path;
+ fdt_supernode_atdepth_offset;
+ fdt_node_depth;
+ fdt_parent_offset;
+ fdt_node_offset_by_prop_value;
+ fdt_node_offset_by_phandle;
+ fdt_node_check_compatible;
+ fdt_node_offset_by_compatible;
+ fdt_setprop_inplace;
+ fdt_nop_property;
+ fdt_nop_node;
+ fdt_create;
+ fdt_add_reservemap_entry;
+ fdt_finish_reservemap;
+ fdt_begin_node;
+ fdt_property;
+ fdt_end_node;
+ fdt_finish;
+ fdt_open_into;
+ fdt_pack;
+ fdt_add_mem_rsv;
+ fdt_del_mem_rsv;
+ fdt_set_name;
+ fdt_setprop;
+ fdt_delprop;
+ fdt_add_subnode_namelen;
+ fdt_add_subnode;
+ fdt_del_node;
+ fdt_strerror;
+ fdt_offset_ptr;
+ fdt_next_tag;
+
+ local:
+ *;
+};
diff --git a/libflash/Makefile.inc b/libflash/Makefile.inc
new file mode 100644
index 0000000..35f96f7
--- /dev/null
+++ b/libflash/Makefile.inc
@@ -0,0 +1,7 @@
+LIBFLASH_SRCS = libflash.c libffs.c
+LIBFLASH_OBJS = $(LIBFLASH_SRCS:%.c=%.o)
+
+SUBDIRS += libflash
+LIBFLASH = libflash/built-in.o
+
+$(LIBFLASH): $(LIBFLASH_OBJS:%=libflash/%)
diff --git a/libflash/ffs.h b/libflash/ffs.h
new file mode 100644
index 0000000..2969c4b
--- /dev/null
+++ b/libflash/ffs.h
@@ -0,0 +1,159 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/pnor/ffs.h $ */
+/* */
+/* IBM CONFIDENTIAL */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2013 */
+/* */
+/* p1 */
+/* */
+/* Object Code Only (OCO) source materials */
+/* Licensed Internal Code Source Materials */
+/* IBM HostBoot Licensed Internal Code */
+/* */
+/* The source code for this program is not published or otherwise */
+/* divested of its trade secrets, irrespective of what has been */
+/* deposited with the U.S. Copyright Office. */
+/* */
+/* Origin: 30 */
+/* */
+/* IBM_PROLOG_END_TAG */
+/*
+ * Copyright (c) International Business Machines Corp., 2012
+ *
+ * FSP Flash Structure
+ *
+ * This header defines the layout for the FSP Flash Structure.
+ */
+
+#ifndef __FFS_H__
+#define __FFS_H__
+
+/* Pull in the correct header depending on what is being built */
+#if defined(__KERNEL__)
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/* The version of this partition implementation */
+#define FFS_VERSION_1 1
+
+/* Magic number for the partition header (ASCII 'PART') */
+#define FFS_MAGIC 0x50415254
+
+/* The maximum length of the partition name */
+#define PART_NAME_MAX 15
+
+/*
+ * Sizes of the data structures
+ */
+#define FFS_HDR_SIZE sizeof(struct ffs_hdr)
+#define FFS_ENTRY_SIZE sizeof(struct ffs_entry)
+
+/*
+ * Sizes of the data structures w/o checksum
+ */
+#define FFS_HDR_SIZE_CSUM (FFS_HDR_SIZE - sizeof(uint32_t))
+#define FFS_ENTRY_SIZE_CSUM (FFS_ENTRY_SIZE - sizeof(uint32_t))
+
+/* pid of logical partitions/containers */
+#define FFS_PID_TOPLEVEL 0xFFFFFFFF
+
+/*
+ * Type of image contained w/in partition
+ */
+enum type {
+ FFS_TYPE_DATA = 1,
+ FFS_TYPE_LOGICAL = 2,
+ FFS_TYPE_PARTITION = 3,
+};
+
+/*
+ * Flag bit definitions
+ */
+#define FFS_FLAGS_PROTECTED 0x0001
+#define FFS_FLAGS_U_BOOT_ENV 0x0002
+
+/*
+ * Number of user data words
+ */
+#define FFS_USER_WORDS 16
+
+/**
+ * struct ffs_entry - Partition entry
+ *
+ * @name: Opaque null terminated string
+ * @base: Starting offset of partition in flash (in hdr.block_size)
+ * @size: Partition size (in hdr.block_size)
+ * @pid: Parent partition entry (FFS_PID_TOPLEVEL for toplevel)
+ * @id: Partition entry ID [1..65536]
+ * @type: Describe type of partition
+ * @flags: Partition attributes (optional)
+ * @actual: Actual partition size (in bytes)
+ * @resvd: Reserved words for future use
+ * @user: User data (optional)
+ * @checksum: Partition entry checksum (includes all above)
+ */
+struct ffs_entry {
+ char name[PART_NAME_MAX + 1];
+ uint32_t base;
+ uint32_t size;
+ uint32_t pid;
+ uint32_t id;
+ uint32_t type;
+ uint32_t flags;
+ uint32_t actual;
+ uint32_t resvd[4];
+ struct {
+ uint32_t data[FFS_USER_WORDS];
+ } user;
+ uint32_t checksum;
+} __attribute__ ((packed));
+
+/**
+ * struct ffs_hdr - FSP Flash Structure header
+ *
+ * @magic: Eye catcher/corruption detector
+ * @version: Version of the structure
+ * @size: Size of partition table (in block_size)
+ * @entry_size: Size of struct ffs_entry element (in bytes)
+ * @entry_count: Number of struct ffs_entry elements in @entries array
+ * @block_size: Size of block on device (in bytes)
+ * @block_count: Number of blocks on device
+ * @resvd: Reserved words for future use
+ * @checksum: Header checksum
+ * @entries: Pointer to array of partition entries
+ */
+struct ffs_hdr {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t size;
+ uint32_t entry_size;
+ uint32_t entry_count;
+ uint32_t block_size;
+ uint32_t block_count;
+ uint32_t resvd[4];
+ uint32_t checksum;
+ struct ffs_entry entries[];
+} __attribute__ ((packed));
+
+
+#endif /* __FFS_H__ */
diff --git a/libflash/libffs.c b/libflash/libffs.c
new file mode 100644
index 0000000..ef2aa4d
--- /dev/null
+++ b/libflash/libffs.c
@@ -0,0 +1,280 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <ccan/endian/endian.h>
+
+#include "libffs.h"
+
+enum ffs_type {
+ ffs_type_flash,
+ ffs_type_image,
+};
+
+struct ffs_handle {
+ struct ffs_hdr hdr; /* Converted header */
+ enum ffs_type type;
+ struct flash_chip *chip;
+ uint32_t flash_offset;
+ uint32_t max_size;
+ void *cache;
+ uint32_t cached_size;
+};
+
+static uint32_t ffs_checksum(void* data, size_t size)
+{
+ uint32_t i, csum = 0;
+
+ for (i = csum = 0; i < (size/4); i++)
+ csum ^= ((uint32_t *)data)[i];
+ return csum;
+}
+
+static int ffs_check_convert_header(struct ffs_hdr *dst, struct ffs_hdr *src)
+{
+ dst->magic = be32_to_cpu(src->magic);
+ if (dst->magic != FFS_MAGIC)
+ return FFS_ERR_BAD_MAGIC;
+ dst->version = be32_to_cpu(src->version);
+ if (dst->version != FFS_VERSION_1)
+ return FFS_ERR_BAD_VERSION;
+ if (ffs_checksum(src, FFS_HDR_SIZE) != 0)
+ return FFS_ERR_BAD_CKSUM;
+ dst->size = be32_to_cpu(src->size);
+ dst->entry_size = be32_to_cpu(src->entry_size);
+ dst->entry_count = be32_to_cpu(src->entry_count);
+ dst->block_size = be32_to_cpu(src->block_size);
+ dst->block_count = be32_to_cpu(src->block_count);
+
+ return 0;
+}
+
+int ffs_open_flash(struct flash_chip *chip, uint32_t offset,
+ uint32_t max_size, struct ffs_handle **ffs)
+{
+ struct ffs_hdr hdr;
+ struct ffs_handle *f;
+ uint32_t fl_size, erase_size;
+ int rc;
+
+ if (!ffs)
+ return FLASH_ERR_PARM_ERROR;
+ *ffs = NULL;
+
+ /* Grab some info about our flash chip */
+ rc = flash_get_info(chip, NULL, &fl_size, &erase_size);
+ if (rc) {
+ FL_ERR("FFS: Error %d retrieving flash info\n", rc);
+ return rc;
+ }
+ if ((offset + max_size) < offset)
+ return FLASH_ERR_PARM_ERROR;
+ if ((offset + max_size) > fl_size)
+ return FLASH_ERR_PARM_ERROR;
+
+ /* Read flash header */
+ rc = flash_read(chip, offset, &hdr, sizeof(hdr));
+ if (rc) {
+ FL_ERR("FFS: Error %d reading flash header\n", rc);
+ return rc;
+ }
+
+ /* Allocate ffs_handle structure and start populating */
+ f = malloc(sizeof(*f));
+ if (!f)
+ return FLASH_ERR_MALLOC_FAILED;
+ memset(f, 0, sizeof(*f));
+ f->type = ffs_type_flash;
+ f->flash_offset = offset;
+ f->max_size = max_size ? max_size : (fl_size - offset);
+ f->chip = chip;
+
+ /* Convert and check flash header */
+ rc = ffs_check_convert_header(&f->hdr, &hdr);
+ if (rc) {
+ FL_ERR("FFS: Error %d checking flash header\n", rc);
+ free(f);
+ return rc;
+ }
+
+ /*
+ * Decide how much of the image to grab to get the whole
+ * partition map.
+ */
+ f->cached_size = f->hdr.block_size * f->hdr.size;
+ FL_DBG("FFS: Partition map size: 0x%x\n", f->cached_size);
+
+ /* Align to erase size */
+ f->cached_size |= (erase_size - 1);
+ f->cached_size &= ~(erase_size - 1);
+ FL_DBG("FFS: Aligned to: 0x%x\n", f->cached_size);
+
+ /* Allocate cache */
+ f->cache = malloc(f->cached_size);
+ if (!f->cache) {
+ free(f);
+ return FLASH_ERR_MALLOC_FAILED;
+ }
+
+ /* Read the cached map */
+ rc = flash_read(chip, offset, f->cache, f->cached_size);
+ if (rc) {
+ FL_ERR("FFS: Error %d reading flash partition map\n", rc);
+ free(f);
+ }
+ if (rc == 0)
+ *ffs = f;
+ return rc;
+}
+
+#if 0 /* XXX TODO: For FW updates so we can copy nvram around */
+int ffs_open_image(void *image, uint32_t size, uint32_t offset,
+ struct ffs_handle **ffs)
+{
+}
+#endif
+
+void ffs_close(struct ffs_handle *ffs)
+{
+ if (ffs->cache)
+ free(ffs->cache);
+ free(ffs);
+}
+
+static struct ffs_entry *ffs_get_part(struct ffs_handle *ffs, uint32_t index,
+ uint32_t *out_offset)
+{
+ uint32_t esize = ffs->hdr.entry_size;
+ uint32_t offset = FFS_HDR_SIZE + index * esize;
+
+ if (index > ffs->hdr.entry_count)
+ return NULL;
+ if (out_offset)
+ *out_offset = offset;
+ return (struct ffs_entry *)(ffs->cache + offset);
+}
+
+static int ffs_check_convert_entry(struct ffs_entry *dst, struct ffs_entry *src)
+{
+ if (ffs_checksum(src, FFS_ENTRY_SIZE) != 0)
+ return FFS_ERR_BAD_CKSUM;
+ memcpy(dst->name, src->name, sizeof(dst->name));
+ dst->base = be32_to_cpu(src->base);
+ dst->size = be32_to_cpu(src->size);
+ dst->pid = be32_to_cpu(src->pid);
+ dst->id = be32_to_cpu(src->id);
+ dst->type = be32_to_cpu(src->type);
+ dst->flags = be32_to_cpu(src->flags);
+ dst->actual = be32_to_cpu(src->actual);
+
+ return 0;
+}
+
+int ffs_lookup_part(struct ffs_handle *ffs, const char *name,
+ uint32_t *part_idx)
+{
+ struct ffs_entry ent;
+ uint32_t i;
+ int rc;
+
+ /* Lookup the requested partition */
+ for (i = 0; i < ffs->hdr.entry_count; i++) {
+ struct ffs_entry *src_ent = ffs_get_part(ffs, i, NULL);
+ rc = ffs_check_convert_entry(&ent, src_ent);
+ if (rc) {
+ FL_ERR("FFS: Bad entry %d in partition map\n", i);
+ continue;
+ }
+ if (!strncmp(name, ent.name, sizeof(ent.name)))
+ break;
+ }
+ if (i >= ffs->hdr.entry_count)
+ return FFS_ERR_PART_NOT_FOUND;
+ if (part_idx)
+ *part_idx = i;
+ return 0;
+}
+
+int ffs_part_info(struct ffs_handle *ffs, uint32_t part_idx,
+ char **name, uint32_t *start,
+ uint32_t *total_size, uint32_t *act_size)
+{
+ struct ffs_entry *raw_ent;
+ struct ffs_entry ent;
+ char *n;
+ int rc;
+
+ if (part_idx >= ffs->hdr.entry_count)
+ return FFS_ERR_PART_NOT_FOUND;
+
+ raw_ent = ffs_get_part(ffs, part_idx, NULL);
+ if (!raw_ent)
+ return FFS_ERR_PART_NOT_FOUND;
+
+ rc = ffs_check_convert_entry(&ent, raw_ent);
+ if (rc) {
+ FL_ERR("FFS: Bad entry %d in partition map\n", part_idx);
+ return rc;
+ }
+ if (start)
+ *start = ent.base * ffs->hdr.block_size;
+ if (total_size)
+ *total_size = ent.size * ffs->hdr.block_size;
+ if (act_size)
+ *act_size = ent.actual;
+ if (name) {
+ n = malloc(PART_NAME_MAX + 1);
+ memset(n, 0, PART_NAME_MAX + 1);
+ strncpy(n, ent.name, PART_NAME_MAX);
+ *name = n;
+ }
+ return 0;
+}
+
+int ffs_update_act_size(struct ffs_handle *ffs, uint32_t part_idx,
+ uint32_t act_size)
+{
+ struct ffs_entry *ent;
+ uint32_t offset;
+
+ if (part_idx >= ffs->hdr.entry_count) {
+ FL_DBG("FFS: Entry out of bound\n");
+ return FFS_ERR_PART_NOT_FOUND;
+ }
+
+ ent = ffs_get_part(ffs, part_idx, &offset);
+ if (!ent) {
+ FL_DBG("FFS: Entry not found\n");
+ return FFS_ERR_PART_NOT_FOUND;
+ }
+ FL_DBG("FFS: part index %d at offset 0x%08x\n",
+ part_idx, offset);
+
+ if (ent->actual == cpu_to_be32(act_size)) {
+ FL_DBG("FFS: ent->actual alrady matches: 0x%08x==0x%08x\n",
+ cpu_to_be32(act_size), ent->actual);
+ return 0;
+ }
+ ent->actual = cpu_to_be32(act_size);
+ ent->checksum = ffs_checksum(ent, FFS_ENTRY_SIZE_CSUM);
+ if (!ffs->chip)
+ return 0;
+ return flash_smart_write(ffs->chip, offset, ent, FFS_ENTRY_SIZE);
+}
diff --git a/libflash/libffs.h b/libflash/libffs.h
new file mode 100644
index 0000000..5a3ff40
--- /dev/null
+++ b/libflash/libffs.h
@@ -0,0 +1,56 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __LIBFFS_H
+#define __LIBFFS_H
+
+#include <libflash/libflash.h>
+#include <libflash/ffs.h>
+
+/* FFS handle, opaque */
+struct ffs_handle;
+
+/* Error codes:
+ *
+ * < 0 = flash controller errors
+ * 0 = success
+ * > 0 = libffs / libflash errors
+ */
+#define FFS_ERR_BAD_MAGIC 100
+#define FFS_ERR_BAD_VERSION 101
+#define FFS_ERR_BAD_CKSUM 102
+#define FFS_ERR_PART_NOT_FOUND 103
+
+int ffs_open_flash(struct flash_chip *chip, uint32_t offset,
+ uint32_t max_size, struct ffs_handle **ffs);
+
+/* TODO
+int ffs_open_image(void *image, uint32_t size, struct ffs_handle **ffs);
+*/
+
+void ffs_close(struct ffs_handle *ffs);
+
+int ffs_lookup_part(struct ffs_handle *ffs, const char *name,
+ uint32_t *part_idx);
+
+int ffs_part_info(struct ffs_handle *ffs, uint32_t part_idx,
+ char **name, uint32_t *start,
+ uint32_t *total_size, uint32_t *act_size);
+
+int ffs_update_act_size(struct ffs_handle *ffs, uint32_t part_idx,
+ uint32_t act_size);
+
+
+#endif /* __LIBFFS_H */
diff --git a/libflash/libflash-priv.h b/libflash/libflash-priv.h
new file mode 100644
index 0000000..44fa513
--- /dev/null
+++ b/libflash/libflash-priv.h
@@ -0,0 +1,213 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __LIBFLASH_PRIV_H
+#define __LIBFLASH_PRIV_H
+
+#include <ccan/endian/endian.h>
+#include <ccan/array_size/array_size.h>
+#include <ccan/container_of/container_of.h>
+
+/* Flash commands */
+#define CMD_PP 0x02
+#define CMD_READ 0x03
+#define CMD_WRDI 0x04
+#define CMD_RDSR 0x05
+#define CMD_WREN 0x06
+#define CMD_SE 0x20
+#define CMD_RDSCUR 0x2b
+#define CMD_BE32K 0x52
+#define CMD_CE 0x60
+#define CMD_RDID 0x9f
+#define CMD_EN4B 0xb7
+#define CMD_BE 0xd8
+#define CMD_RDDPB 0xe0
+#define CMD_RDSPB 0xe2
+#define CMD_EX4B 0xe9
+
+/* Flash status bits */
+#define STAT_WIP 0x01
+#define STAT_WEN 0x02
+
+/* This isn't exposed to clients but is to controllers */
+struct flash_info {
+ uint32_t id;
+ uint32_t size;
+ uint32_t flags;
+#define FL_ERASE_4K 0x00000001 /* Supports 4k erase */
+#define FL_ERASE_32K 0x00000002 /* Supports 32k erase */
+#define FL_ERASE_64K 0x00000004 /* Supports 64k erase */
+#define FL_ERASE_CHIP 0x00000008 /* Supports 64k erase */
+#define FL_ERASE_ALL (FL_ERASE_4K | FL_ERASE_32K | FL_ERASE_64K | \
+ FL_ERASE_CHIP)
+#define FL_CAN_4B 0x00000010 /* Supports 4b mode */
+ const char *name;
+};
+
+/* Flash controller, return negative values for errors */
+struct spi_flash_ctrl {
+ /*
+ * The controller can provide basically two interfaces,
+ * either a fairly high level one and a lower level one.
+ *
+ * If all functions of the high level interface are
+ * implemented then the low level one is optional. A
+ * controller can implement some of the high level one
+ * in which case the missing ones will be handled by
+ * libflash using the low level interface.
+ *
+ * There are also some common functions.
+ */
+
+ /* **************************************************
+ * Misc / common functions
+ * **************************************************/
+
+ /*
+ * - setup(ctrl, info, tsize)
+ *
+ * Provides the controller with an option to configure itself
+ * based on the specific flash type. It can also override some
+ * settings in the info block such as available erase sizes etc...
+ * which can be needed for high level controllers. It can also
+ * override the total flash size.
+ */
+ int (*setup)(struct spi_flash_ctrl *ctrl, struct flash_info *info,
+ uint32_t *tsize);
+
+ /*
+ * - set_4b(ctrl, enable)
+ *
+ * enable : Switch to 4bytes (true) or 3bytes (false) address mode
+ *
+ * Set the controller's address size. If the controller doesn't
+ * implement the low level command interface, then this must also
+ * configure the flash chip itself. Otherwise, libflash will do it.
+ *
+ * Note that if this isn't implemented, then libflash might still
+ * try to switch large flash chips to 4b mode if the low level cmd
+ * interface is implemented. It will then also stop using the high
+ * level command interface since it's assumed that it cannot handle
+ * 4b addresses.
+ */
+ int (*set_4b)(struct spi_flash_ctrl *ctrl, bool enable);
+
+
+
+ /* **************************************************
+ * High level interface
+ * **************************************************/
+
+ /*
+ * Read chip ID. This can return up to 16 bytes though the
+ * current libflash will only use 3 (room for things like
+ * extended micron stuff).
+ *
+ * id_size is set on entry to the buffer size and need to
+ * be adjusted to the actual ID size read.
+ *
+ * If NULL, libflash will use cmd_rd to send normal RDID (0x9f)
+ * command.
+ */
+ int (*chip_id)(struct spi_flash_ctrl *ctrl, uint8_t *id_buf,
+ uint32_t *id_size);
+
+ /*
+ * Read from flash. There is no specific constraint on
+ * alignment or size other than not reading outside of
+ * the chip.
+ *
+ * If NULL, libflash will use cmd_rd to send normal
+ * READ (0x03) commands.
+ */
+ int (*read)(struct spi_flash_ctrl *ctrl, uint32_t addr, void *buf,
+ uint32_t size);
+
+ /*
+ * Write to flash. There is no specific constraint on
+ * alignment or size other than not reading outside of
+ * the chip. The driver is responsible for handling
+ * 256-bytes page alignment and to send the write enable
+ * commands when needed.
+ *
+ * If absent, libflash will use cmd_wr to send WREN (0x06)
+ * and PP (0x02) commands.
+ *
+ * Note: This does not need to handle erasing. libflash
+ * will ensure that this is never used for changing a bit
+ * value from 0 to 1.
+ */
+ int (*write)(struct spi_flash_ctrl *ctrl, uint32_t addr,
+ const void *buf, uint32_t size);
+
+ /*
+ * Erase. This will be called for erasing a portion of
+ * the flash using a granularity (alignment of start and
+ * size) that is no less than the smallest supported
+ * erase size in the info block (*). The driver is
+ * responsible to send write enable commands when needed.
+ *
+ * If absent, libflash will use cmd_wr to send WREN (0x06)
+ * and either of SE (0x20), BE32K (0x52) or BE (0xd8)
+ * based on what the flash chip supports.
+ *
+ * (*) Note: This is called with addr=0 and size=0xffffffff
+ * in which case this is used as a "chip erase". Return
+ * FLASH_ERR_CHIP_ER_NOT_SUPPORTED if not supported. Some
+ * future version of libflash might then emulate it using
+ * normal erase commands.
+ */
+ int (*erase)(struct spi_flash_ctrl *ctrl, uint32_t addr,
+ uint32_t size);
+
+ /* **************************************************
+ * Low level interface
+ * **************************************************/
+
+ /* Note: For commands with no data, libflash will might use
+ * either cmd_rd or cmd_wr.
+ */
+
+ /*
+ * - cmd_rd(ctrl, cmd, has_addr, address, buffer, size);
+ *
+ * cmd : command opcode
+ * has_addr : send an address after the command
+ * address : address to send
+ * buffer : buffer for additional data to read (or NULL)
+ * size : size of additional data read (or NULL)
+ *
+ * Sends a command and optionally read additional data
+ */
+ int (*cmd_rd)(struct spi_flash_ctrl *ctrl, uint8_t cmd,
+ bool has_addr, uint32_t addr, void *buffer,
+ uint32_t size);
+ /*
+ * - cmd_wr(ctrl, cmd, has_addr, address, buffer, size);
+ *
+ * cmd : command opcode
+ * has_addr : send an address after the command
+ * address : address to send
+ * buffer : buffer for additional data to write (or NULL)
+ * size : size of additional data write (or NULL)
+ *
+ * Sends a command and optionally write additional data
+ */
+ int (*cmd_wr)(struct spi_flash_ctrl *ctrl, uint8_t cmd,
+ bool has_addr, uint32_t addr, const void *buffer,
+ uint32_t size);
+};
+
+#endif /* LIBFLASH_PRIV_H */
diff --git a/libflash/libflash.c b/libflash/libflash.c
new file mode 100644
index 0000000..a3e6ff2
--- /dev/null
+++ b/libflash/libflash.c
@@ -0,0 +1,636 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "libflash.h"
+#include "libflash-priv.h"
+
+static const struct flash_info flash_info[] = {
+ { 0xc22019, 0x02000000, FL_ERASE_ALL | FL_CAN_4B, "MXxxL25635F"},
+ { 0xc2201a, 0x04000000, FL_ERASE_ALL | FL_CAN_4B, "MXxxL51235F"},
+ { 0xef4018, 0x01000000, FL_ERASE_ALL, "W25Q128BV" },
+ { 0x55aa55, 0x00100000, FL_ERASE_ALL | FL_CAN_4B, "TEST_FLASH"},
+};
+
+struct flash_chip {
+ struct spi_flash_ctrl *ctrl; /* Controller */
+ struct flash_info info; /* Flash info */
+ uint32_t tsize; /* Corrected flash size */
+ uint32_t min_erase_mask; /* Minimum erase size */
+ bool mode_4b; /* Flash currently in 4b mode */
+ struct flash_req *cur_req; /* Current request */
+ void *smart_buf; /* Buffer for smart writes */
+};
+
+static int fl_read_stat(struct flash_chip *c, uint8_t *stat)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+
+ return ct->cmd_rd(ct, CMD_RDSR, false, 0, stat, 1);
+}
+
+static int fl_wren(struct flash_chip *c)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ uint8_t stat;
+ int i, rc;
+
+ /* Some flashes need it to be hammered */
+ for (i = 0; i < 10; i++) {
+ rc = ct->cmd_wr(ct, CMD_WREN, false, 0, NULL, 0);
+ if (rc) return rc;
+ rc = fl_read_stat(c, &stat);
+ if (rc) return rc;
+ if (stat & STAT_WEN)
+ return 0;
+ }
+ return FLASH_ERR_WREN_TIMEOUT;
+}
+
+/* Synchronous write completion, probably need a yield hook */
+static int fl_sync_wait_idle(struct flash_chip *c)
+{
+ int rc;
+ uint8_t stat;
+
+ /* XXX Add timeout */
+ for (;;) {
+ rc = fl_read_stat(c, &stat);
+ if (rc) return rc;
+ if (!(stat & STAT_WIP))
+ return 0;
+ }
+ /* return FLASH_ERR_WIP_TIMEOUT; */
+}
+
+int flash_read(struct flash_chip *c, uint32_t pos, void *buf, uint32_t len)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+
+ /* XXX Add sanity/bound checking */
+
+ /*
+ * If the controller supports read and either we are in 3b mode
+ * or we are in 4b *and* the controller supports it, then do a
+ * high level read.
+ */
+ if ((!c->mode_4b || ct->set_4b) && ct->read)
+ return ct->read(ct, pos, buf, len);
+
+ /* Otherwise, go manual if supported */
+ if (!ct->cmd_rd)
+ return FLASH_ERR_CTRL_CMD_UNSUPPORTED;
+ return ct->cmd_rd(ct, CMD_READ, true, pos, buf, len);
+}
+
+static void fl_get_best_erase(struct flash_chip *c, uint32_t dst, uint32_t size,
+ uint32_t *chunk, uint8_t *cmd)
+{
+ /* Smaller than 32k, use 4k */
+ if ((dst & 0x7fff) || (size < 0x8000)) {
+ *chunk = 0x1000;
+ *cmd = CMD_SE;
+ return;
+ }
+ /* Smaller than 64k and 32k is supported, use it */
+ if ((c->info.flags & FL_ERASE_32K) &&
+ ((dst & 0xffff) || (size < 0x10000))) {
+ *chunk = 0x8000;
+ *cmd = CMD_BE32K;
+ return;
+ }
+ /* If 64K is not supported, use whatever smaller size is */
+ if (!(c->info.flags & FL_ERASE_64K)) {
+ if (c->info.flags & FL_ERASE_32K) {
+ *chunk = 0x8000;
+ *cmd = CMD_BE32K;
+ } else {
+ *chunk = 0x1000;
+ *cmd = CMD_SE;
+ }
+ return;
+ }
+ /* Allright, let's go for 64K */
+ *chunk = 0x10000;
+ *cmd = CMD_BE;
+}
+
+int flash_erase(struct flash_chip *c, uint32_t dst, uint32_t size)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ uint32_t chunk;
+ uint8_t cmd;
+ int rc;
+
+ /* Some sanity checking */
+ if (((dst + size) <= dst) || !size || (dst + size) > c->tsize)
+ return FLASH_ERR_PARM_ERROR;
+
+ /* Check boundaries fit erase blocks */
+ if ((dst | size) & c->min_erase_mask)
+ return FLASH_ERR_ERASE_BOUNDARY;
+
+ FL_DBG("LIBFLASH: Erasing 0x%08x..0%08x...\n", dst, dst + size);
+
+ /* Use controller erase if supported */
+ if (ct->erase)
+ return ct->erase(ct, dst, size);
+
+ /* Allright, loop as long as there's something to erase */
+ while(size) {
+ /* How big can we make it based on alignent & size */
+ fl_get_best_erase(c, dst, size, &chunk, &cmd);
+
+ /* Poke write enable */
+ rc = fl_wren(c);
+ if (rc)
+ return rc;
+
+ /* Send erase command */
+ rc = ct->cmd_wr(ct, cmd, true, dst, NULL, 0);
+ if (rc)
+ return rc;
+
+ /* Wait for write complete */
+ rc = fl_sync_wait_idle(c);
+ if (rc)
+ return rc;
+
+ size -= chunk;
+ dst += chunk;
+ }
+ return 0;
+}
+
+int flash_erase_chip(struct flash_chip *c)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ int rc;
+
+ /* XXX TODO: Fallback to using normal erases */
+ if (!(c->info.flags & FL_ERASE_CHIP))
+ return FLASH_ERR_CHIP_ER_NOT_SUPPORTED;
+
+ FL_DBG("LIBFLASH: Erasing chip...\n");
+
+ /* Use controller erase if supported */
+ if (ct->erase)
+ return ct->erase(ct, 0, 0xffffffff);
+
+ rc = fl_wren(c);
+ if (rc) return rc;
+
+ rc = ct->cmd_wr(ct, CMD_CE, false, 0, NULL, 0);
+ if (rc)
+ return rc;
+
+ /* Wait for write complete */
+ return fl_sync_wait_idle(c);
+}
+
+static int fl_wpage(struct flash_chip *c, uint32_t dst, const void *src,
+ uint32_t size)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ int rc;
+
+ if (size < 1 || size > 0x100)
+ return FLASH_ERR_BAD_PAGE_SIZE;
+
+ rc = fl_wren(c);
+ if (rc) return rc;
+
+ rc = ct->cmd_wr(ct, CMD_PP, true, dst, src, size);
+ if (rc)
+ return rc;
+
+ /* Wait for write complete */
+ return fl_sync_wait_idle(c);
+}
+
+int flash_write(struct flash_chip *c, uint32_t dst, const void *src,
+ uint32_t size, bool verify)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ uint32_t todo = size;
+ uint32_t d = dst;
+ const void *s = src;
+ uint8_t vbuf[0x100];
+ int rc;
+
+ /* Some sanity checking */
+ if (((dst + size) <= dst) || !size || (dst + size) > c->tsize)
+ return FLASH_ERR_PARM_ERROR;
+
+ FL_DBG("LIBFLASH: Writing to 0x%08x..0%08x...\n", dst, dst + size);
+
+ /*
+ * If the controller supports write and either we are in 3b mode
+ * or we are in 4b *and* the controller supports it, then do a
+ * high level write.
+ */
+ if ((!c->mode_4b || ct->set_4b) && ct->write) {
+ rc = ct->write(ct, dst, src, size);
+ if (rc)
+ return rc;
+ goto writing_done;
+ }
+
+ /* Otherwise, go manual if supported */
+ if (!ct->cmd_wr)
+ return FLASH_ERR_CTRL_CMD_UNSUPPORTED;
+
+ /* Iterate for each page to write */
+ while(todo) {
+ uint32_t chunk;
+
+ /* Handle misaligned start */
+ chunk = 0x100 - (d & 0xff);
+ if (chunk > 0x100)
+ chunk = 0x100;
+ if (chunk > todo)
+ chunk = todo;
+
+ rc = fl_wpage(c, d, s, chunk);
+ if (rc) return rc;
+ d += chunk;
+ s += chunk;
+ todo -= chunk;
+ }
+
+ writing_done:
+ if (!verify)
+ return 0;
+
+ /* Verify */
+ FL_DBG("LIBFLASH: Verifying...\n");
+
+ while(size) {
+ uint32_t chunk;
+
+ chunk = sizeof(vbuf);
+ if (chunk > size)
+ chunk = size;
+ rc = flash_read(c, dst, vbuf, chunk);
+ if (rc) return rc;
+ if (memcmp(vbuf, src, chunk)) {
+ FL_ERR("LIBFLASH: Miscompare at 0x%08x\n", dst);
+ return FLASH_ERR_VERIFY_FAILURE;
+ }
+ dst += chunk;
+ src += chunk;
+ size -= chunk;
+ }
+ return 0;
+}
+
+enum sm_comp_res {
+ sm_no_change,
+ sm_need_write,
+ sm_need_erase,
+};
+
+static enum sm_comp_res flash_smart_comp(struct flash_chip *c,
+ const void *src,
+ uint32_t offset, uint32_t size)
+{
+ uint8_t *b = c->smart_buf + offset;
+ const uint8_t *s = src;
+ bool is_same = true;
+ uint32_t i;
+
+ /* SRC DEST NEED_ERASE
+ * 0 1 0
+ * 1 1 0
+ * 0 0 0
+ * 1 0 1
+ */
+ for (i = 0; i < size; i++) {
+ /* Any bit need to be set, need erase */
+ if (s[i] & ~b[i])
+ return sm_need_erase;
+ if (is_same && (b[i] != s[i]))
+ is_same = false;
+ }
+ return is_same ? sm_no_change : sm_need_write;
+}
+
+int flash_smart_write(struct flash_chip *c, uint32_t dst, const void *src,
+ uint32_t size)
+{
+ uint32_t er_size = c->min_erase_mask + 1;
+ uint32_t end = dst + size;
+ int rc;
+
+ /* Some sanity checking */
+ if (end <= dst || !size || end > c->tsize) {
+ FL_DBG("LIBFLASH: Smart write param error\n");
+ return FLASH_ERR_PARM_ERROR;
+ }
+
+ FL_DBG("LIBFLASH: Smart writing to 0x%08x..0%08x...\n",
+ dst, dst + size);
+
+ /* As long as we have something to write ... */
+ while(dst < end) {
+ uint32_t page, off, chunk;
+ enum sm_comp_res sr;
+
+ /* Figure out which erase page we are in and read it */
+ page = dst & ~c->min_erase_mask;
+ off = dst & c->min_erase_mask;
+ FL_DBG("LIBFLASH: reading page 0x%08x..0x%08x...",
+ page, page + er_size);
+ rc = flash_read(c, page, c->smart_buf, er_size);
+ if (rc) {
+ FL_DBG(" error %d!\n", rc);
+ return rc;
+ }
+
+ /* Locate the chunk of data we are working on */
+ chunk = er_size - off;
+ if (size < chunk)
+ chunk = size;
+
+ /* Compare against what we are writing and ff */
+ sr = flash_smart_comp(c, src, off, chunk);
+ switch(sr) {
+ case sm_no_change:
+ /* Identical, skip it */
+ FL_DBG(" same !\n");
+ break;
+ case sm_need_write:
+ /* Just needs writing over */
+ FL_DBG(" need write !\n");
+ rc = flash_write(c, dst, src, chunk, true);
+ if (rc) {
+ FL_DBG("LIBFLASH: Write error %d !\n", rc);
+ return rc;
+ }
+ break;
+ case sm_need_erase:
+ FL_DBG(" need erase !\n");
+ rc = flash_erase(c, page, er_size);
+ if (rc) {
+ FL_DBG("LIBFLASH: erase error %d !\n", rc);
+ return rc;
+ }
+ /* Then update the portion of the buffer and write the block */
+ memcpy(c->smart_buf + off, src, chunk);
+ rc = flash_write(c, page, c->smart_buf, er_size, true);
+ if (rc) {
+ FL_DBG("LIBFLASH: write error %d !\n", rc);
+ return rc;
+ }
+ break;
+ }
+ dst += chunk;
+ src += chunk;
+ size -= chunk;
+ }
+ return 0;
+}
+
+
+static int flash_identify(struct flash_chip *c)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ const struct flash_info *info = NULL;
+ uint32_t iid, id_size;
+#define MAX_ID_SIZE 16
+ uint8_t id[MAX_ID_SIZE];
+ int rc, i;
+
+ if (ct->chip_id) {
+ /* High level controller interface */
+ id_size = MAX_ID_SIZE;
+ rc = ct->chip_id(ct, id, &id_size);
+ if (rc)
+ return rc;
+ } else {
+ /* Fallback to get ID manually */
+ rc = ct->cmd_rd(ct, CMD_RDID, false, 0, id, 3);
+ if (rc)
+ return rc;
+ id_size = 3;
+ }
+ if (id_size < 3)
+ return FLASH_ERR_CHIP_UNKNOWN;
+
+ /* Convert to a dword for lookup */
+ iid = id[0];
+ iid = (iid << 8) | id[1];
+ iid = (iid << 8) | id[2];
+
+ FL_DBG("LIBFLASH: Flash ID: %02x.%02x.%02x (%06x)\n",
+ id[0], id[1], id[2], iid);
+
+ /* Lookup in flash_info */
+ for (i = 0; i < ARRAY_SIZE(flash_info); i++) {
+ info = &flash_info[i];
+ if (info->id == iid)
+ break;
+ }
+ if (info->id != iid)
+ return FLASH_ERR_CHIP_UNKNOWN;
+
+ c->info = *info;
+ c->tsize = info->size;
+
+ /*
+ * Let controller know about our settings and possibly
+ * override them
+ */
+ if (ct->setup) {
+ rc = ct->setup(ct, &c->info, &c->tsize);
+ if (rc)
+ return rc;
+ }
+
+ /* Calculate min erase granularity */
+ if (c->info.flags & FL_ERASE_4K)
+ c->min_erase_mask = 0xfff;
+ else if (c->info.flags & FL_ERASE_32K)
+ c->min_erase_mask = 0x7fff;
+ else if (c->info.flags & FL_ERASE_64K)
+ c->min_erase_mask = 0xffff;
+ else {
+ /* No erase size ? oops ... */
+ FL_ERR("LIBFLASH: No erase sizes !\n");
+ return FLASH_ERR_CTRL_CONFIG_MISMATCH;
+ }
+
+ FL_DBG("LIBFLASH: Found chip %s size %dM erase granule: %dK\n",
+ c->info.name, c->tsize >> 20, (c->min_erase_mask + 1) >> 10);
+
+ return 0;
+}
+
+static int flash_set_4b(struct flash_chip *c, bool enable)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+
+ return ct->cmd_wr(ct, enable ? CMD_EN4B : CMD_EX4B, false, 0, NULL, 0);
+}
+
+int flash_force_4b_mode(struct flash_chip *c, bool enable_4b)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ int rc;
+
+ /*
+ * We only allow force 4b if both controller and flash do 4b
+ * as this is mainly used if a 3rd party tries to directly
+ * access a direct mapped read region
+ */
+ if (enable_4b && !((c->info.flags & FL_CAN_4B) && ct->set_4b))
+ return FLASH_ERR_4B_NOT_SUPPORTED;
+
+ /* Only send to flash directly on controllers that implement
+ * the low level callbacks
+ */
+ if (ct->cmd_wr) {
+ rc = flash_set_4b(c, enable_4b);
+ if (rc)
+ return rc;
+ }
+
+ /* Then inform the controller */
+ if (ct->set_4b)
+ rc = ct->set_4b(ct, enable_4b);
+ return rc;
+}
+
+static int flash_configure(struct flash_chip *c)
+{
+ struct spi_flash_ctrl *ct = c->ctrl;
+ int rc;
+
+ /* Crop flash size if necessary */
+ if (c->tsize > 0x01000000 && !(c->info.flags & FL_CAN_4B)) {
+ FL_ERR("LIBFLASH: Flash chip cropped to 16M, no 4b mode\n");
+ c->tsize = 0x01000000;
+ }
+
+ /* If flash chip > 16M, enable 4b mode */
+ if (c->tsize > 0x01000000) {
+ FL_DBG("LIBFLASH: Flash >16MB, enabling 4B mode...\n");
+
+ /* Set flash to 4b mode if we can */
+ if (ct->cmd_wr) {
+ rc = flash_set_4b(c, true);
+ if (rc) {
+ FL_ERR("LIBFLASH: Failed to set flash 4b mode\n");
+ return rc;
+ }
+ }
+
+
+ /* Set controller to 4b mode if supported */
+ if (ct->set_4b) {
+ FL_DBG("LIBFLASH: Enabling controller 4B mode...\n");
+ rc = ct->set_4b(ct, true);
+ if (rc) {
+ FL_ERR("LIBFLASH: Failed"
+ " to set controller 4b mode\n");
+ return rc;
+ }
+ }
+ } else {
+ FL_DBG("LIBFLASH: Flash <=16MB, disabling 4B mode...\n");
+
+ /*
+ * If flash chip supports 4b mode, make sure we disable
+ * it in case it was left over by the previous user
+ */
+ if (c->info.flags & FL_CAN_4B) {
+ rc = flash_set_4b(c, false);
+ if (rc) {
+ FL_ERR("LIBFLASH: Failed to"
+ " clear flash 4b mode\n");
+ return rc;
+ }
+ }
+
+ /* Set controller to 3b mode if mode switch is supported */
+ if (ct->set_4b) {
+ FL_DBG("LIBFLASH: Disabling controller 4B mode...\n");
+ rc = ct->set_4b(ct, false);
+ if (rc) {
+ FL_ERR("LIBFLASH: Failed to"
+ " clear controller 4b mode\n");
+ return rc;
+ }
+ }
+ }
+ return 0;
+}
+
+int flash_get_info(struct flash_chip *chip, const char **name,
+ uint32_t *total_size, uint32_t *erase_granule)
+{
+ if (name)
+ *name = chip->info.name;
+ if (total_size)
+ *total_size = chip->tsize;
+ if (erase_granule)
+ *erase_granule = chip->min_erase_mask + 1;
+ return 0;
+}
+
+int flash_init(struct spi_flash_ctrl *ctrl, struct flash_chip **flash)
+{
+ struct flash_chip *c;
+ int rc;
+
+ *flash = NULL;
+ c = malloc(sizeof(struct flash_chip));
+ if (!c)
+ return FLASH_ERR_MALLOC_FAILED;
+ memset(c, 0, sizeof(*c));
+ c->ctrl = ctrl;
+
+ rc = flash_identify(c);
+ if (rc) {
+ FL_ERR("LIBFLASH: Flash identification failed\n");
+ goto bail;
+ }
+ c->smart_buf = malloc(c->min_erase_mask + 1);
+ if (!c->smart_buf) {
+ FL_ERR("LIBFLASH: Failed to allocate smart buffer !\n");
+ rc = FLASH_ERR_MALLOC_FAILED;
+ goto bail;
+ }
+ rc = flash_configure(c);
+ if (rc)
+ FL_ERR("LIBFLASH: Flash configuration failed\n");
+ bail:
+ if (rc) {
+ free(c);
+ return rc;
+ }
+ *flash = c;
+ return 0;
+}
+
+void flash_exit(struct flash_chip *chip)
+{
+ /* XXX Make sure we are idle etc... */
+ free(chip);
+}
+
diff --git a/libflash/libflash.h b/libflash/libflash.h
new file mode 100644
index 0000000..e8d357b
--- /dev/null
+++ b/libflash/libflash.h
@@ -0,0 +1,83 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __LIBFLASH_H
+#define __LIBFLASH_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifndef FL_INF
+#define FL_INF(fmt...) do { printf(fmt); } while(0)
+#endif
+
+#ifndef FL_DBG
+//#define FL_DBG(fmt...) do { printf(fmt); } while(0)
+#define FL_DBG(fmt...) do { } while(0)
+#endif
+
+#ifndef FL_ERR
+#define FL_ERR(fmt...) do { printf(fmt); } while(0)
+#endif
+
+/* API status/return:
+ *
+ * <0 = flash controller errors passed through,
+ * 0 = success
+ * >0 = libflash error
+ */
+
+#define FLASH_ERR_MALLOC_FAILED 1
+#define FLASH_ERR_CHIP_UNKNOWN 2
+#define FLASH_ERR_PARM_ERROR 3
+#define FLASH_ERR_ERASE_BOUNDARY 4
+#define FLASH_ERR_WREN_TIMEOUT 5
+#define FLASH_ERR_WIP_TIMEOUT 6
+#define FLASH_ERR_BAD_PAGE_SIZE 7
+#define FLASH_ERR_VERIFY_FAILURE 8
+#define FLASH_ERR_4B_NOT_SUPPORTED 9
+#define FLASH_ERR_CTRL_CONFIG_MISMATCH 10
+#define FLASH_ERR_CHIP_ER_NOT_SUPPORTED 11
+#define FLASH_ERR_CTRL_CMD_UNSUPPORTED 12
+#define FLASH_ERR_CTRL_TIMEOUT 13
+
+/* Flash chip, opaque */
+struct flash_chip;
+struct spi_flash_ctrl;
+
+int flash_init(struct spi_flash_ctrl *ctrl, struct flash_chip **flash);
+void flash_exit(struct flash_chip *chip);
+
+int flash_get_info(struct flash_chip *chip, const char **name,
+ uint32_t *total_size, uint32_t *erase_granule);
+
+/* libflash sets the 4b mode automatically based on the flash
+ * size and controller capabilities but it can be overriden
+ */
+int flash_force_4b_mode(struct flash_chip *chip, bool enable_4b);
+
+int flash_read(struct flash_chip *c, uint32_t pos, void *buf, uint32_t len);
+int flash_erase(struct flash_chip *c, uint32_t dst, uint32_t size);
+int flash_write(struct flash_chip *c, uint32_t dst, const void *src,
+ uint32_t size, bool verify);
+int flash_smart_write(struct flash_chip *c, uint32_t dst, const void *src,
+ uint32_t size);
+
+/* chip erase may not be supported by all chips/controllers, get ready
+ * for FLASH_ERR_CHIP_ER_NOT_SUPPORTED
+ */
+int flash_erase_chip(struct flash_chip *c);
+
+#endif /* __LIBFLASH_H */
diff --git a/libflash/test/Makefile.check b/libflash/test/Makefile.check
new file mode 100644
index 0000000..f9f1ca8
--- /dev/null
+++ b/libflash/test/Makefile.check
@@ -0,0 +1,20 @@
+# -*-Makefile-*-
+LIBFLASH_TEST := libflash/test/test-flash
+
+check: $(LIBFLASH_TEST:%=%-check)
+
+$(LIBFLASH_TEST:%=%-check) : %-check: %
+ $(VALGRIND) $<
+
+libflash/test/stubs.o: libflash/test/stubs.c
+ $(HOSTCC) $(HOSTCFLAGS) -g -c -o $@ $<
+
+$(LIBFLASH_TEST) : libflash/test/stubs.o libflash/libflash.c
+
+$(LIBFLASH_TEST) : % : %.c
+ $(HOSTCC) $(HOSTCFLAGS) -O0 -g -I include -I . -o $@ $< libflash/test/stubs.o
+
+clean: libflash-test-clean
+
+libflash-test-clean:
+ $(RM) libflash/test/*.o $(LIBFLASH_TEST)
diff --git a/libflash/test/stubs.c b/libflash/test/stubs.c
new file mode 100644
index 0000000..aabf018
--- /dev/null
+++ b/libflash/test/stubs.c
@@ -0,0 +1,16 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Stubs for libflash test */
diff --git a/libflash/test/test-flash.c b/libflash/test/test-flash.c
new file mode 100644
index 0000000..5f48797
--- /dev/null
+++ b/libflash/test/test-flash.c
@@ -0,0 +1,418 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <libflash/libflash.h>
+#include <libflash/libflash-priv.h>
+
+#include "../libflash.c"
+
+#define __unused __attribute__((unused))
+
+#define ERR(fmt...) fprintf(stderr, fmt)
+
+/* Flash commands */
+#define CMD_PP 0x02
+#define CMD_READ 0x03
+#define CMD_WRDI 0x04
+#define CMD_RDSR 0x05
+#define CMD_WREN 0x06
+#define CMD_SE 0x20
+#define CMD_RDSCUR 0x2b
+#define CMD_BE32K 0x52
+#define CMD_CE 0x60
+#define CMD_RDID 0x9f
+#define CMD_EN4B 0xb7
+#define CMD_BE 0xd8
+#define CMD_RDDPB 0xe0
+#define CMD_RDSPB 0xe2
+#define CMD_EX4B 0xe9
+
+/* Flash status bits */
+#define STAT_WIP 0x01
+#define STAT_WEN 0x02
+
+static uint8_t *sim_image;
+static uint32_t sim_image_sz = 0x100000;
+static uint32_t sim_index;
+static uint32_t sim_addr;
+static uint32_t sim_er_size;
+static uint8_t sim_sr;
+static bool sim_fl_4b;
+static bool sim_ct_4b;
+
+static enum sim_state {
+ sim_state_idle,
+ sim_state_rdid,
+ sim_state_rdsr,
+ sim_state_read_addr,
+ sim_state_read_data,
+ sim_state_write_addr,
+ sim_state_write_data,
+ sim_state_erase_addr,
+ sim_state_erase_done,
+} sim_state;
+
+/*
+ * Simulated flash & controller
+ */
+static int sim_start_cmd(uint8_t cmd)
+{
+ if (sim_state != sim_state_idle) {
+ ERR("SIM: Command %02x in wrong state %d\n", cmd, sim_state);
+ return -1;
+ }
+
+ sim_index = 0;
+ sim_addr = 0;
+
+ switch(cmd) {
+ case CMD_RDID:
+ sim_state = sim_state_rdid;
+ break;
+ case CMD_RDSR:
+ sim_state = sim_state_rdsr;
+ break;
+ case CMD_EX4B:
+ sim_fl_4b = false;
+ break;
+ case CMD_EN4B:
+ sim_fl_4b = true;
+ break;
+ case CMD_WREN:
+ sim_sr |= STAT_WEN;
+ break;
+ case CMD_READ:
+ sim_state = sim_state_read_addr;
+ if (sim_ct_4b != sim_fl_4b)
+ ERR("SIM: 4b mode mismatch in READ !\n");
+ break;
+ case CMD_PP:
+ sim_state = sim_state_write_addr;
+ if (sim_ct_4b != sim_fl_4b)
+ ERR("SIM: 4b mode mismatch in PP !\n");
+ if (!(sim_sr & STAT_WEN))
+ ERR("SIM: PP without WEN, ignoring... \n");
+ break;
+ case CMD_SE:
+ case CMD_BE32K:
+ case CMD_BE:
+ if (sim_ct_4b != sim_fl_4b)
+ ERR("SIM: 4b mode mismatch in SE/BE !\n");
+ if (!(sim_sr & STAT_WEN))
+ ERR("SIM: SE/BE without WEN, ignoring... \n");
+ sim_state = sim_state_erase_addr;
+ switch(cmd) {
+ case CMD_SE: sim_er_size = 0x1000; break;
+ case CMD_BE32K: sim_er_size = 0x8000; break;
+ case CMD_BE: sim_er_size = 0x10000; break;
+ }
+ break;
+ case CMD_CE:
+ if (!(sim_sr & STAT_WEN)) {
+ ERR("SIM: CE without WEN, ignoring... \n");
+ break;
+ }
+ memset(sim_image, 0xff, sim_image_sz);
+ sim_sr |= STAT_WIP;
+ sim_sr &= ~STAT_WEN;
+ break;
+ default:
+ ERR("SIM: Unsupported command %02x\n", cmd);
+ return -1;
+ }
+ return 0;
+}
+
+static void sim_end_cmd(void)
+{
+ /* For write and sector/block erase, set WIP & clear WEN here */
+ if (sim_state == sim_state_write_data) {
+ sim_sr |= STAT_WIP;
+ sim_sr &= ~STAT_WEN;
+ }
+ sim_state = sim_state_idle;
+}
+
+static bool sim_do_address(const uint8_t **buf, uint32_t *len)
+{
+ uint8_t asize = sim_fl_4b ? 4 : 3;
+ const uint8_t *p = *buf;
+
+ while(*len) {
+ sim_addr = (sim_addr << 8) | *(p++);
+ *buf = p;
+ *len = *len - 1;
+ sim_index++;
+ if (sim_index >= asize)
+ return true;
+ }
+ return false;
+}
+
+static int sim_wbytes(const void *buf, uint32_t len)
+{
+ const uint8_t *b = buf;
+ bool addr_complete;
+
+ again:
+ switch(sim_state) {
+ case sim_state_read_addr:
+ addr_complete = sim_do_address(&b, &len);
+ if (addr_complete) {
+ sim_state = sim_state_read_data;
+ sim_index = 0;
+ if (len)
+ goto again;
+ }
+ break;
+ case sim_state_write_addr:
+ addr_complete = sim_do_address(&b, &len);
+ if (addr_complete) {
+ sim_state = sim_state_write_data;
+ sim_index = 0;
+ if (len)
+ goto again;
+ }
+ break;
+ case sim_state_write_data:
+ if (!(sim_sr & STAT_WEN))
+ break;
+ while(len--) {
+ uint8_t c = *(b++);
+ if (sim_addr >= sim_image_sz) {
+ ERR("SIM: Write past end of flash\n");
+ return -1;
+ }
+ /* Flash write only clears bits */
+ sim_image[sim_addr] &= c;
+ sim_addr = (sim_addr & 0xffffff00) |
+ ((sim_addr + 1) & 0xff);
+ }
+ break;
+ case sim_state_erase_addr:
+ if (!(sim_sr & STAT_WEN))
+ break;
+ addr_complete = sim_do_address(&b, &len);
+ if (addr_complete) {
+ memset(sim_image + sim_addr, 0xff, sim_er_size);
+ sim_sr |= STAT_WIP;
+ sim_sr &= ~STAT_WEN;
+ sim_state = sim_state_erase_done;
+ }
+ break;
+ default:
+ ERR("SIM: Write in wrong state %d\n", sim_state);
+ return -1;
+ }
+ return 0;
+}
+
+static int sim_rbytes(void *buf, uint32_t len)
+{
+ uint8_t *b = buf;
+
+ switch(sim_state) {
+ case sim_state_rdid:
+ while(len--) {
+ switch(sim_index) {
+ case 0:
+ *(b++) = 0x55;
+ break;
+ case 1:
+ *(b++) = 0xaa;
+ break;
+ case 2:
+ *(b++) = 0x55;
+ break;
+ default:
+ ERR("SIM: RDID index %d\n", sim_index);
+ *(b++) = 0;
+ break;
+ }
+ sim_index++;
+ }
+ break;
+ case sim_state_rdsr:
+ while(len--) {
+ *(b++) = sim_sr;
+ if (sim_index > 0)
+ ERR("SIM: RDSR index %d\n", sim_index);
+ sim_index++;
+
+ /* If WIP was 1, clear it, ie, simulate write/erase
+ * completion
+ */
+ sim_sr &= ~STAT_WIP;
+ }
+ break;
+ case sim_state_read_data:
+ while(len--) {
+ if (sim_addr >= sim_image_sz) {
+ ERR("SIM: Read past end of flash\n");
+ return -1;
+ }
+ *(b++) = sim_image[sim_addr++];
+ }
+ break;
+ default:
+ ERR("SIM: Read in wrong state %d\n", sim_state);
+ return -1;
+ }
+ return 0;
+}
+
+static int sim_send_addr(uint32_t addr)
+{
+ const void *ap;
+
+ /* Layout address MSB first in memory */
+ addr = cpu_to_be32(addr);
+
+ /* Send the right amount of bytes */
+ ap = (char *)&addr;
+
+ if (sim_ct_4b)
+ return sim_wbytes(ap, 4);
+ else
+ return sim_wbytes(ap + 1, 3);
+}
+
+static int sim_cmd_rd(struct spi_flash_ctrl *ctrl __unused, uint8_t cmd,
+ bool has_addr, uint32_t addr, void *buffer,
+ uint32_t size)
+{
+ int rc;
+
+ rc = sim_start_cmd(cmd);
+ if (rc)
+ goto bail;
+ if (has_addr) {
+ rc = sim_send_addr(addr);
+ if (rc)
+ goto bail;
+ }
+ if (buffer && size)
+ rc = sim_rbytes(buffer, size);
+ bail:
+ sim_end_cmd();
+ return rc;
+}
+
+static int sim_cmd_wr(struct spi_flash_ctrl *ctrl __unused, uint8_t cmd,
+ bool has_addr, uint32_t addr, const void *buffer,
+ uint32_t size)
+{
+ int rc;
+
+ rc = sim_start_cmd(cmd);
+ if (rc)
+ goto bail;
+ if (has_addr) {
+ rc = sim_send_addr(addr);
+ if (rc)
+ goto bail;
+ }
+ if (buffer && size)
+ rc = sim_wbytes(buffer, size);
+ bail:
+ sim_end_cmd();
+ return rc;
+}
+
+static int sim_set_4b(struct spi_flash_ctrl *ctrl __unused, bool enable)
+{
+ sim_ct_4b = enable;
+
+ return 0;
+}
+
+static int sim_read(struct spi_flash_ctrl *ctrl __unused, uint32_t pos,
+ void *buf, uint32_t len)
+{
+ if (sim_ct_4b != sim_fl_4b)
+ ERR("SIM: 4b mode mismatch in autoread !\n");
+ if ((pos + len) < pos)
+ return -1;
+ if ((pos + len) > sim_image_sz)
+ return -1;
+ memcpy(buf, sim_image + pos, len);
+ return 0;
+};
+
+struct spi_flash_ctrl sim_ctrl = {
+ .cmd_wr = sim_cmd_wr,
+ .cmd_rd = sim_cmd_rd,
+ .set_4b = sim_set_4b,
+ .read = sim_read,
+};
+
+int main(void)
+{
+ struct flash_chip *fl;
+ uint32_t total_size, erase_granule;
+ const char *name;
+ uint16_t *test;
+ int i, rc;
+
+ sim_image = malloc(sim_image_sz);
+ memset(sim_image, 0xff, sim_image_sz);
+ test = malloc(0x10000 * 2);
+
+ rc = flash_init(&sim_ctrl, &fl);
+ if (rc) {
+ ERR("flash_init failed with err %d\n", rc);
+ exit(1);
+ }
+ rc = flash_get_info(fl, &name, &total_size, &erase_granule);
+ if (rc) {
+ ERR("flash_get_info failed with err %d\n", rc);
+ exit(1);
+ }
+
+ /* Make up a test pattern */
+ for (i=0; i<0x10000;i++)
+ test[i] = cpu_to_be16(i);
+
+ /* Write 64k of stuff at 0 and at 128k */
+ printf("Writing test patterns...\n");
+ flash_smart_write(fl, 0, test, 0x10000);
+ flash_smart_write(fl, 0x20000, test, 0x10000);
+
+ /* Write "Hello world" straddling the 64k boundary */
+#define HW "Hello World"
+ printf("Writing test string...\n");
+ flash_smart_write(fl, 0xfffc, HW, sizeof(HW));
+
+ /* Check result */
+ if (memcmp(sim_image + 0xfffc, HW, sizeof(HW))) {
+ ERR("Test string mismatch !\n");
+ exit(1);
+ }
+ printf("Test string pass\n");
+ if (memcmp(sim_image, test, 0xfffc)) {
+ ERR("Test pattern mismatch !\n");
+ exit(1);
+ }
+ printf("Test pattern pass\n");
+ flash_exit(fl);
+
+ return 0;
+}
+
diff --git a/libpore/Makefile.inc b/libpore/Makefile.inc
new file mode 100644
index 0000000..02b5837
--- /dev/null
+++ b/libpore/Makefile.inc
@@ -0,0 +1,13 @@
+ifeq ($(PORE),1)
+LIBPORE_SRCS = p8_pore_table_gen_api_fixed.C
+LIBPORE_SRCS += p8_pore_table_static_data.c sbe_xip_image.c pore_inline_assembler.c
+LIBPORE_OBJS_1 = $(LIBPORE_SRCS:%.c=%.o)
+LIBPORE_OBJS = $(LIBPORE_OBJS_1:%.C=%.o)
+endif
+SUBDIRS += libpore
+LIBPORE = libpore/built-in.o
+
+CFLAGS_SKIP_libpore/pore_inline_assembler.o=-Wsuggest-attribute=const
+
+$(LIBPORE): $(LIBPORE_OBJS:%=libpore/%)
+
diff --git a/libpore/fapi_sbe_common.H b/libpore/fapi_sbe_common.H
new file mode 100644
index 0000000..fa8cc71
--- /dev/null
+++ b/libpore/fapi_sbe_common.H
@@ -0,0 +1,69 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/include/fapi_sbe_common.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef __FAPI_SBE_COMMON_H
+#define __FAPI_SBE_COMMON_H
+
+// $Id: fapi_sbe_common.H,v 1.1 2012/04/16 23:55:37 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/fapi_sbe_common.H,v $
+//------------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2011
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//------------------------------------------------------------------------------
+// *! OWNER NAME : Email:
+
+/// \file fapi_sbe_common.H
+/// \brief Definitions common to FAPI and SBE procedures
+///
+/// Several preprocessor macros are required to have different definitions in
+/// C, C++ and SBE assembly procedures. These common forms are collected here.
+
+#if defined __ASSEMBLER__
+
+#define CONST_UINT8_T(name, expr) .set name, (expr)
+#define CONST_UINT32_T(name, expr) .set name, (expr)
+#define CONST_UINT64_T(name, expr) .set name, (expr)
+
+#define ULL(x) x
+
+#elif defined __cplusplus
+
+#include <stdint.h>
+
+#define CONST_UINT8_T(name, expr) const uint8_t name = (expr);
+#define CONST_UINT32_T(name, expr) const uint32_t name = (expr);
+#define CONST_UINT64_T(name, expr) const uint64_t name = (expr);
+
+#define ULL(x) x##ull
+
+#else // C code
+
+// CONST_UINT[8,3,64]_T() can't be used in C code/headers; Use
+//
+// #define <symbol> <value> [ or ULL(<value>) for 64-bit constants
+
+#define ULL(x) x##ull
+
+#endif // __ASSEMBLER__
+
+#endif // __FAPI_SBE_COMMON_H
diff --git a/libpore/p8_delta_scan_rw.h b/libpore/p8_delta_scan_rw.h
new file mode 100644
index 0000000..5637e7c
--- /dev/null
+++ b/libpore/p8_delta_scan_rw.h
@@ -0,0 +1,466 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_delta_scan_rw.h $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+// $Id: p8_delta_scan_rw.h,v 1.49 2014/05/13 13:31:51 jmcgill Exp $
+#define OVERRIDE_OFFSET 8 // Byte offset of forward pointer's addr relative
+ // to base forward pointer's addr.
+#define SIZE_IMAGE_BUF_MAX 5000000 // Max ~5MB image buffer size.
+#define SIZE_IMAGE_CENTAUR_MAX 5000000 // Max ~5MB image buffer size.
+#define SIZE_REPR_RING_MAX 50000 // Max ~50kB repr ring buffer size.
+#define SCOM_REG_MASK 0x00ffffff // Scom register mask (within a chiplet)
+#define CID_MASK 0xff000000 // Chiplet ID mask
+#define CID_EX_LOW 0x10 // Lowest EX chiplet addr
+#define CID_EX_HIGH 0x1f // Highest EX chiplet addr
+
+/***** Xip customize support ****/
+#define COMBINED_GOOD_VECTORS_TOC_NAME "combined_good_vectors"
+#define L2_SINGLE_MEMBER_ENABLE_TOC_NAME "l2_single_member_enable_mask"
+#define PROC_PIB_REPR_VECTOR_TOC_NAME "proc_sbe_pibmem_repair_vector"
+#define NEST_SKEWADJUST_VECTOR_TOC_NAME "proc_sbe_nest_skewadjust_vector"
+#define SECURITY_SETUP_VECTOR_TOC_NAME "proc_sbe_security_setup_vector"
+#define VALID_BOOT_CORES_MASK_TOC_NAME "valid_boot_cores_mask"
+#define MAX_PLL_RING_SIZE 128 // Bytes
+#define PERV_BNDY_PLL_RING_TOC_NAME "perv_bndy_pll_ring"
+#define PERV_BNDY_PLL_RING_ALT_TOC_NAME "perv_bndy_pll_ring_alt"
+#define MAX_FUNC_L3_RING_LIST_ENTRIES 64
+#define MAX_FUNC_L3_RING_SIZE 7000 // Bytes
+#define FUNC_L3_RING_TOC_NAME "ex_func_l3_ring"
+#define MAX_CEN_PLL_RING_SIZE 80 // Bytes
+#define TP_PLL_BNDY_RING_ALT_TOC_NAME "tp_pll_bndy_ring_alt"
+#define STANDALONE_MBOX0_VALUE_TOC_NAME "standalone_mbox0_value"
+#define STANDALONE_MBOX1_VALUE_TOC_NAME "standalone_mbox1_value"
+#define STANDALONE_MBOX2_VALUE_TOC_NAME "standalone_mbox2_value"
+#define STANDALONE_MBOX3_VALUE_TOC_NAME "standalone_mbox3_value"
+#define UNTRUSTED_BAR_TOC_NAME "fabric_config"
+#define UNTRUSTED_PBA_BAR_TOC_NAME "fabric_config_pba"
+#define REFCLOCK_TERM_TOC_NAME "refclock_term"
+
+/***** Scan setting *****/
+#define OPCG_SCAN_RATIO 4
+#define P8_OPCG_SCAN_RATIO_BITS (uint64_t(OPCG_SCAN_RATIO-1)<<(63-8))
+#define P8_OPCG_GO_BITS (uint64_t(0x40000000)<<32)
+#define P8_SCAN_POLL_MASK_BIT15 (uint64_t(0x00010000)<<32)
+
+/***** Scan Control Regs *****/
+#define P8_PORE_OPCG_CTRL_REG0_0x00030002 0x00030002 // OPCG control reg 0
+#define P8_PORE_OPCG_CTRL_REG1_0x00030003 0x00030003 // OPCG control reg 1
+#define P8_PORE_OPCG_CTRL_REG2_0x00030004 0x00030004 // OPCG control reg 2
+#define P8_PORE_OPCG_START_REG3_0x00030005 0x00030005 // OPCG start reg 3
+#define P8_PORE_CLOCK_REGION_0x00030006 0x00030006 // Clock region control
+#define P8_PORE_CLOCK_CONTROLLER_REG 0x00030007 // Addr of clock ctrl scom reg
+#define P8_PORE_CLOCK_STATUS_0x00030008 0x00030008 // Status of clocks running
+#define P8_PORE_SHIFT_REG 0x00038000 // Addr of scom reg that does scan ring shifting
+#define P8_SCAN_CHECK_WORD 0xA5A55A5A // Header check word
+
+/***** Ring state *****/
+#define MAX_RING_SIZE 500000 // 500kbits is the max ring size in bits
+
+/***** Return codes *****/
+#define DSLWB_RING_SEARCH_MATCH 0
+#define DSLWB_RING_SEARCH_EXHAUST_MATCH 30
+#define DSLWB_RING_SEARCH_NO_MATCH 31
+#define DSLWB_RING_SEARCH_MESS 32
+#define DSLWB_SLWB_SUCCESS 0
+#define DSLWB_SLWB_NO_RING_MATCH 40
+#define DSLWB_SLWB_DX_ERROR 41
+#define DSLWB_SLWB_WF_ERROR 42
+#define DSLWB_SLWB_WF_IMAGE_ERROR 43
+#define DSLWB_SLWB_IMAGE_ERROR 44
+#define DSLWB_SLWB_UNKNOWN_ERROR 45
+#define IMGBUILD_SUCCESS 0 // Successful img build.
+#define IMGBUILD_ERR_GENERIC 1 // Non-specific error code.
+#define IMGBUILD_ERR_FILE_ACCESS 2 // Unable to access/open file.
+#define IMGBUILD_ERR_CHIPLET_ID_MESS 4 // Chiplet ID mess(mostly for VPD rings).
+#define IMGBUILD_NO_RINGS_FOUND 5 // Successful img build but no rings found.
+#define IMGBUILD_BAD_ARGS 6 // Bad function arguments.
+#define IMGBUILD_ERR_MEMORY 7 // Memory allocation error.
+#define IMGBUILD_ERR_RING_TOO_LARGE 8 // Ring size exceeds HB/PHYP's buffer.
+#define IMGBUILD_ERR_CHECK_CODE 9 // Coding or image data problem.
+#define IMGBUILD_INVALID_IMAGE 10 // Invalid image.
+#define IMGBUILD_IMAGE_SIZE_MISMATCH 11 // Mismatch between image sizes.
+#define IMGBUILD_IMAGE_SIZE_MESS 12 // Messed up image or section sizes.
+#define IMGBUILD_RINGTYPE_NOT_ALLOWED 13 // Ringtype not allowed.
+#define IMGBUILD_BUFFER_TOO_SMALL 14 // Buffer too small.
+#define IMGBUILD_ERR_PORE_INLINE 20 // Pore inline error.
+#define IMGBUILD_ERR_PORE_INLINE_ASM 21 // Err assoc w/inline assembler.
+#define IMGBUILD_RING_SEARCH_MATCH 0
+#define IMGBUILD_RING_SEARCH_EXHAUST_MATCH 30
+#define IMGBUILD_RING_SEARCH_NO_MATCH 31
+#define IMGBUILD_RING_SEARCH_MESS 32
+#define IMGBUILD_ERR_RING_SEARCH 33 // Err assoc w/ring retrieval.
+#define IMGBUILD_ERR_DATACARE_RING_MESS 34 // Err assoc w/datacare & vpd ring sizes.
+#define IMGBUILD_ERR_WF_CREATE 45 // Err assoc w/create_wiggle_flip_prg.
+#define IMGBUILD_ERR_RING_WRITE_TO_IMAGE 46 // Err assoc w/wr_ring_block_to_img.
+#define IMGBUILD_ERR_SECTION_SIZING 48 // Err assoc w/section sizing.
+#define IMGBUILD_ERR_GET_SECTION 49 // Err assoc w/getting section ID.
+#define IMGBUILD_ERR_SECTION_DELETE 50 // Err assoc w/deleting ELF section.
+#define IMGBUILD_ERR_APPEND 51 // Err assoc w/appending to ELF section.
+#define IMGBUILD_ERR_INCOMPLETE_IMG_BUILD 52 // The image was built, but with errors.
+#define IMGBUILD_ERR_FWD_BACK_PTR_MESS 53 // Forward or backward pointer mess.
+#define IMGBUILD_ERR_KEYWORD_NOT_FOUND 54 // Image keyword not found.
+#define IMGBUILD_ERR_MISALIGNED_RING_LAYOUT 55 // Ring layout is misaligned.
+#define IMGBUILD_ERR_IMAGE_TOO_LARGE 56 // Image too large. Exceeded max size.
+#define IMGBUILD_ERR_XIP_MISC 57 // Miscellaneous XIP image error.
+#define IMGBUILD_ERR_XIP_UNKNOWN 58 // Unknown XIP image error.
+#define IMGBUILD_ERR_RS4_DECOMPRESS 59 // Error during RS4 decompression.
+#define IMGBUILD_ERR_RS4_COMPRESS 60 // Error during RS4 compression.
+#define IMGBUILD_ERR_RAM_HDRS_NOT_SYNCED 61 // Ram headers not synchronized.
+#define IMGBUILD_ERR_RAM_TABLE_FULL 63 // Ram table is full.
+#define IMGBUILD_ERR_RAM_CODE 64 // Code error in Ram API code.
+#define IMGBUILD_ERR_RAM_INVALID_PARM 65 // Invalid Ramming parameter.
+#define IMGBUILD_WARN_RAM_TABLE_CONTAMINATION 66 // Ram table contamination
+#define IMGBUILD_ERR_RAM_TABLE_FAIL 67 // Unsuccessful RAM table build.
+#define IMGBUILD_ERR_RAM_TABLE_END_NOT_FOUND 68 // Table entry end bit not found.
+#define IMGBUILD_ERR_SCOM_INVALID_PARM 70 // Invalid Scomming parameter.
+#define IMGBUILD_ERR_SCOM_HDRS_NOT_SYNCD 72 // Scom headers out of sync.
+#define IMGBUILD_ERR_SCOM_ENTRY_NOT_FOUND 74 // Scom entry not found (OR/AND oper.)
+#define IMGBUILD_ERR_SCOM_REPEAT_ENTRIES 76 // Repeat entries not allow.
+#define IMGBUILD_ERR_SCOM_INVALID_SUBSECTION 77 // Invalid subsection value.
+#define IMGBUILD_ERR_SCOM_TABLE_FAIL 79 // Unsuccessful SCOM table build.
+
+#if defined SLW_COMMAND_LINE_RAM || defined XIPC_COMMAND_LINE
+#define SLW_COMMAND_LINE
+#endif
+
+#if defined __FAPI && !(defined __P8_PORE_TABLE_GEN_API_C)
+#define MY_INF(_fmt_, _args_...) FAPI_INF(_fmt_, ##_args_)
+#ifndef SLW_COMMAND_LINE
+#define MY_ERR(_fmt_, _args_...) FAPI_ERR(_fmt_, ##_args_)
+#else
+#define MY_ERR(_fmt_, _args_...) FAPI_INF(_fmt_, ##_args_)
+#endif // End of SLW_COMMAND_LINE
+#define MY_DBG(_fmt_, _args_...) FAPI_DBG(_fmt_, ##_args_)
+#else // End of __FAPI
+#ifdef SLW_COMMAND_LINE
+#define MY_INF(_fmt_, _args_...) printf(_fmt_, ##_args_)
+#define MY_ERR(_fmt_, _args_...) printf(_fmt_, ##_args_)
+#define MY_DBG(_fmt_, _args_...) printf(_fmt_, ##_args_)
+#else // End of SLW_COMMAND_LINE
+#ifdef __SKIBOOT__
+#include <skiboot.h>
+//#define MY_INF(_fmt_, _args_...) printf(_fmt_, ##_args_)
+#define MY_INF(_fmt_, _args_...) do { } while(0)
+#define MY_ERR(_fmt_, _args_...) prerror(_fmt_, ##_args_)
+#define MY_DBG(_fmt_, _args_...) do { } while(0)
+#else
+#define MY_INF(_fmt_, _args_...) do { } while(0)
+#define MY_ERR(_fmt_, _args_...) do { } while(0)
+#define MY_DBG(_fmt_, _args_...) do { } while(0)
+#endif
+#endif // End of not(__FAPI) & not(SLW_COMMAND_LINE)
+#endif
+
+#ifdef SLW_COMMAND_LINE
+// Debug and development stuff
+//#define IGNORE_FOR_NOW // Causes code sections to be ignored.
+#define DEBUG_SUPPORT // Activates sbe-xip debug support.
+#endif
+
+/* XXX TEMPORARY */
+#ifdef __SKIBOOT__
+#define DEBUG_SUPPORT // Activates sbe-xip debug support.
+#endif
+
+//#include <stdio.h>
+//#include <stdint.h>
+//#include <stdlib.h>
+#include <p8_pore_api_custom.h>
+#include <string.h>
+
+#if defined SLW_COMMAND_LINE
+#include <stdint.h> // May be in conflict with p8_pore_api_custom.h
+#include <stdlib.h> // May be in conflict with p8_pore_api_custom.h
+#include <stdio.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#endif //End of SLW_COMMAND_LINE
+
+// Not needed by:
+// - Done: p8_pore_table_gen_api, p8_slw_build, p8_xip_customize, sbe_xip_tool,
+// p8_delta_scan, p8_ipl_build, p8_centaur_build.
+// - So, what was this used for?
+//#include <pore_bitmanip.H>
+
+#include <p8_image_help_base.H>
+
+#if !(defined __P8_PORE_TABLE_GEN_API_C) && !(defined __CEN_XIP_CUSTOMIZE_C) && !(defined SLW_COMMAND_LINE_RAM)
+// We don't need this include for gen_cpureg/scom or slw ramming.
+#include <p8_scan_compression.H>
+#endif
+
+#undef __PORE_INLINE_ASSEMBLER_C__
+#include <pore_inline.h>
+
+#if( defined(__cplusplus) && !defined(PLIC_MODULE) )
+extern "C" {
+#endif
+
+
+#if !(defined __P8_PORE_TABLE_GEN_API_C) && !(defined SLW_COMMAND_LINE_RAM)
+
+// Info:
+// DeltaRingLayout describes the sequential order of the content in the compressed delta
+// ring blocks in the .initf section in the SBE-XIP images.
+// When creating the .initf delta ring blocks, the following rules must be followed:
+// - Everything must be stored in BE format.
+// - {entryOffset; sizeOfThis; sizeOfMeta; metaData} must be word-aligned to ensure
+// that the {rs4Launch} starts on a word boundary.
+// - {rs4Launch} must start on a word boundary (see earlier rule how to do that).
+// - {entryOffset; sizeOfThis; sizeOfMeta; metaData; rs4Launch} must be double-word-
+// aligned to ensure that {rs4Delta} starts on a double-word boundary.
+// - {rs4Delta} must start on a double-word bournday (see earlier rule how to do that).
+//
+typedef struct {
+ uint64_t entryOffset;
+ uint64_t backItemPtr;
+ uint32_t sizeOfThis;
+ uint32_t sizeOfMeta; // Exact size of meta data. Arbitrary size. Not null terminated.
+ uint32_t ddLevel;
+ uint8_t sysPhase;
+ uint8_t override;
+ uint8_t reserved1;
+ uint8_t reserved2;
+ char *metaData; // Arbitrary size. Extra bytes to next alignment are random or 0s.
+ uint32_t *rs4Launch; // Code. Must be 4-byte aligned. Actually should be 8-B align!
+ uint32_t *rs4Delta; // Data. Must be 8-byte aligned.
+ uint32_t *wfInline; // Code. Must be 4-byte aligned. Actually should be 8-B align!
+} DeltaRingLayout;
+
+typedef struct {
+ uint32_t sizeOfData;
+ char data[];
+} MetaData;
+
+int calc_ring_delta_state(
+ const uint32_t *i_init,
+ const uint32_t *i_alter,
+ uint32_t *o_delta,
+ const uint32_t i_ringLen);
+
+int create_wiggle_flip_prg(
+ uint32_t *i_deltaRing,
+ uint32_t i_ringBitLen,
+ uint32_t i_scanSelectData,
+ uint32_t i_chipletID,
+ uint32_t **o_wfInline,
+ uint32_t *o_wfInlineLenInWords,
+ uint8_t i_flushOptimization,
+ uint32_t i_scanMaxRotate,
+ uint32_t i_waitsScanDelay,
+ uint32_t i_ddLevel);
+
+uint64_t calc_ring_layout_entry_offset(
+ uint8_t i_typeRingLayout,
+ uint32_t i_sizeMetaData);
+
+int write_ring_block_to_image(
+ void *io_image,
+ const char *i_ringName, // NULL if no name.
+ DeltaRingLayout *i_ringBlock,
+ const uint8_t i_idxVector, // [0-15] - Ignored if ringName==NULL
+ const uint8_t i_override, // [0,1] - Ignored if ringName==NULL
+ const uint8_t i_overridable, // [0,1] - Ignored if ringName==NULL
+ const uint32_t i_sizeImageMax,
+ const uint8_t i_xipSectionId,
+ void *i_bufTmp,
+ const uint32_t i_sizeBufTmp);
+
+#if !(defined __CEN_XIP_CUSTOMIZE_C)
+
+int p8_centaur_build(
+ void *i_imageIn,
+ uint32_t i_ddLevel,
+ void *i_imageOut,
+ uint32_t i_sizeImageOutMax);
+
+int p8_ipl_build(
+ void *i_imageIn,
+ uint32_t i_ddLevel,
+ void *i_imageOut,
+ uint32_t i_sizeImageOutMax);
+
+int get_ring_layout_from_image2(
+ const void *i_imageIn,
+ uint32_t i_ddLevel,
+ uint8_t i_sysPhase,
+ DeltaRingLayout **o_rs4RingLayout,
+ void **nextRing,
+ uint8_t i_xipSectionId);
+
+int gen_ring_delta_state(
+ uint32_t bitLen,
+ uint32_t *i_init,
+ uint32_t *i_alter,
+ uint32_t *o_delta,
+ uint32_t verbose);
+
+int write_rs4_ring_to_ref_image(
+ char *i_fnImage,
+ CompressedScanData *i_RS4,
+ uint32_t i_ddLevel,
+ uint8_t i_sysPhase,
+ uint8_t i_override,
+ uint8_t i_ringType,
+ char *i_varName,
+ char *i_fnMetaData,
+ void *i_bufTmp,
+ uint32_t i_sizeBufTmp,
+ uint32_t verbose);
+
+int write_vpd_ring_to_ipl_image(
+ void *io_image,
+ uint32_t &io_sizeImageOut,
+ CompressedScanData *i_bufRs4Ring,
+ uint32_t i_ddLevel,
+ uint8_t i_sysPhase,
+ char *i_ringName,
+ void *i_bufTmp,
+ uint32_t i_sizeBufTmp,
+ uint8_t i_xipSection);
+
+int write_vpd_ring_to_slw_image(
+ void *io_image,
+ uint32_t &io_sizeImageOut,
+ CompressedScanData *i_bufRs4Ring,
+ uint32_t i_ddLevel,
+ uint8_t i_sysPhase,
+ char *i_ringName,
+ void *i_bufTmp,
+ uint32_t i_sizeBufTmp,
+ uint8_t i_bWcSpace);
+
+int check_and_perform_ring_datacare(
+ void *i_imageRef,
+ void *io_buf1,
+ uint8_t i_ddLevel,
+ uint8_t i_sysPhase,
+ char *i_ringName,
+ void *i_buf2,
+ uint32_t i_sizeBuf2);
+
+int get_delta_ring_from_image(
+ char *i_fnImage,
+ char *i_varName,
+ uint32_t i_ddLevel,
+ uint8_t i_sysPhase,
+ uint8_t i_override,
+ MetaData **o_metaData,
+ CompressedScanData **o_deltaRingRS4,
+ uint32_t verbose);
+
+int write_wiggle_flip_to_image(
+ void *io_imageOut,
+ uint32_t *i_sizeImageMaxNew,
+ DeltaRingLayout *i_ringLayout,
+ uint32_t *i_wfInline,
+ uint32_t i_wfInlineLenInWords);
+
+int get_ring_layout_from_image(
+ const void *i_imageIn,
+ uint32_t i_ddLevel,
+ uint8_t i_sysPhase,
+ DeltaRingLayout *o_rs4RingLayout,
+ void **nextRing);
+
+int append_empty_section(
+ void *io_image,
+ int *i_sizeImageMaxNew,
+ uint32_t i_sectionId,
+ int *i_sizeSection,
+ uint8_t i_bFixed);
+
+int initialize_slw_section(
+ void *io_image,
+ uint32_t *i_sizeImageMaxNew);
+
+int create_and_initialize_fixed_image(
+ void *io_image);
+
+int update_runtime_scom_pointer(
+ void *io_image);
+
+void cleanup(
+ void *buf1=NULL,
+ void *buf2=NULL,
+ void *buf3=NULL,
+ void *buf4=NULL,
+ void *buf5=NULL);
+
+#endif // End of !(defined __CEN_XIP_CUSTOMIZE_C)
+
+#endif // End of !(defined __P8_PORE_TABLE_GEN_API_C) && !(defined SLW_COMMAND_LINE_RAM)
+
+// Byte-reverse a 32-bit integer if on an LE machine
+static inline uint32_t myRev32(const uint32_t i_x)
+{
+ uint32_t rx;
+
+#ifdef _BIG_ENDIAN
+ rx = i_x;
+#else
+ uint8_t *pix = (uint8_t*)(&i_x);
+ uint8_t *prx = (uint8_t*)(&rx);
+
+ prx[0] = pix[3];
+ prx[1] = pix[2];
+ prx[2] = pix[1];
+ prx[3] = pix[0];
+#endif
+
+ return rx;
+}
+
+// Byte-reverse a 64-bit integer if on a little-endian machine
+static inline uint64_t myRev64(const uint64_t i_x)
+{
+ uint64_t rx;
+
+#ifdef _BIG_ENDIAN
+ rx = i_x;
+#else
+ uint8_t *pix = (uint8_t*)(&i_x);
+ uint8_t *prx = (uint8_t*)(&rx);
+
+ prx[0] = pix[7];
+ prx[1] = pix[6];
+ prx[2] = pix[5];
+ prx[3] = pix[4];
+ prx[4] = pix[3];
+ prx[5] = pix[2];
+ prx[6] = pix[1];
+ prx[7] = pix[0];
+#endif
+
+ return rx;
+}
+
+// N-byte align an address, offset or size (aos)
+static inline uint64_t myByteAlign( const uint8_t nBytes, const uint64_t aos)
+{
+ return ((aos+nBytes-1)/nBytes)*nBytes;
+}
+
+#if( defined(__cplusplus) && !defined(PLIC_MODULE) )
+}
+#endif
diff --git a/libpore/p8_image_help_base.H b/libpore/p8_image_help_base.H
new file mode 100644
index 0000000..4662641
--- /dev/null
+++ b/libpore/p8_image_help_base.H
@@ -0,0 +1,125 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_image_help_base.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+// $Id: p8_image_help_base.H,v 1.18 2013/06/10 22:08:20 jeshua Exp $
+//------------------------------------------------------------------------------
+// Title: p8_image_help_base.H
+// Description: Contains the most basic structures and defines needed for
+// image building and interpretation.
+//------------------------------------------------------------------------------
+#ifndef _P8_IMAGE_HELP_BASE_H_
+#define _P8_IMAGE_HELP_BASE_H_
+
+#include <sbe_xip_image.h>
+
+//
+// Various image/ring buffer sizes. Must be used by all users (VBU, FSP, HB, HBI, Cronus)
+//
+const uint32_t MAX_REF_IMAGE_SIZE = 5000000; // Max reference image size.
+const uint32_t FIXED_SEEPROM_WORK_SPACE= 128*1024; // Max work space for Seeprom img.
+const uint32_t MAX_SEEPROM_IMAGE_SIZE = 56*1024; // Max Seeprom image size.
+// Fixed SLW image size (Ensure 128-byte alignment.)
+const uint32_t FIXED_SLW_IMAGE_SIZE = 1024*1024; // Fixed SLW image size for _fixed.
+const uint32_t FIXED_RING_BUF_SIZE = 60000; // Fixed ring buf size for _fixed.
+
+const uint8_t MAX_VPD_TYPES = 2; // #G and #R, so far.
+#define CHIPLET_ID_MIN 0x00
+#define CHIPLET_ID_MAX 0x1F
+#define CHIPLET_ID_EX_MIN 0x10
+#define CHIPLET_ID_EX_MAX 0x1F
+const uint8_t MAX_CHIPLETS = CHIPLET_ID_MAX-CHIPLET_ID_MIN+1;
+const uint32_t ASM_RS4_LAUNCH_BUF_SIZE = 24; // Byte size of RS4 launch buffer.
+const uint32_t WF_ENCAP_SIZE = 400; // Byte size of WF encapsulation.
+ // (Actually, only 304B but may change.)
+const uint32_t WF_WORST_CASE_SIZE_FAC = 4; // WC WF size = 3x ring length.
+ // (Assumes 12B per write.)
+ // (4x w/waits instructions.)
+const uint32_t LISTING_STRING_SIZE = 256;
+const uint64_t MAX_UINT64_T = (uint64_t)0xFFFFFFFF<<32 | (uint64_t)0xFFFFFFFF;
+
+const uint8_t RING_SECTION_ID[] = {
+ SBE_XIP_SECTION_RINGS,
+ SBE_XIP_SECTION_DCRINGS,
+};
+const uint8_t RING_SECTION_ID_SIZE = sizeof(RING_SECTION_ID) / sizeof(RING_SECTION_ID[0]);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Base (shared) ring layout for both RS4 and Wiggle-flip layouts.
+typedef struct {
+ uint64_t entryOffset;
+ uint64_t backItemPtr;
+ uint32_t sizeOfThis;
+ uint32_t sizeOfMeta; // Exact size of meta data. Arbitrary size. Not null terminated.
+} BaseRingLayout;
+
+// RS4 specific layout.
+typedef struct {
+ uint64_t entryOffset;
+ uint64_t backItemPtr;
+ uint32_t sizeOfThis;
+ uint32_t sizeOfMeta; // Exact size of meta data. Arbitrary size. Not null terminated.
+ uint32_t ddLevel;
+ uint8_t sysPhase;
+ uint8_t override;
+ uint8_t reserved1;
+ uint8_t reserved2;
+} Rs4RingLayout;
+
+// PairingInfo is used for pairing, or matching, a back pointer address of a
+// ring block with its corresponding TOC name.
+typedef struct {
+ uint64_t address; // (in) Holds PORE backPtr addr of the ring
+ uint8_t vectorpos; // (in) Vector position of fwdPtr [0;31]
+ // max=0 for most VPD rings
+ // max=1 for all non-VPD rings
+ // max=1 for perv_ VPD rings
+ // max=15 for most VPD ex_ rings
+ // max=31 for 16 ex_ chiplets with override
+ char *name; // (out) TOC name
+ uint8_t isvpd; // (out) 0: Non-VPD ring 1: VPD ring
+ uint8_t overridable; // (out) 0: No (most VPD rings) 1: Yes (all non-VPD rings)
+ uint8_t override; // (out) 0: base 1: override
+} PairingInfo;
+
+
+///
+/// ****************************************************************************
+/// Function declares.
+/// ****************************************************************************
+///
+int over_write_ring_data_in_image( void *io_image,
+ const char *i_ringName,
+ const void *i_ringData, // WF or RS4
+ const uint32_t i_sizeRingData, // Byte size
+ const uint8_t i_idxVector,
+ const uint8_t i_override,
+ const uint8_t i_overridable );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //_P8_IMAGE_HELP_BASE_H_
diff --git a/libpore/p8_pore_api_custom.h b/libpore/p8_pore_api_custom.h
new file mode 100644
index 0000000..473030a
--- /dev/null
+++ b/libpore/p8_pore_api_custom.h
@@ -0,0 +1,141 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_pore_api_custom.h $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+/* $Id: p8_pore_api_custom.h,v 1.5 2012/05/22 21:25:21 cmolsen Exp $ */
+/* $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/utils/p8_pore_api_custom.h,v $ */
+
+#include <stdint.h> /* for uint32_t */
+#include <stdio.h> /* for printf */
+#if !defined(__HOSTBOOT_MODULE) && !defined(__SKIBOOT__)
+#include <netinet/in.h> /* for htonl */
+#endif
+
+/**
+ * This file should be modified by users to appropriately handle some
+ * environment-specific operations.
+ */
+
+
+/*********************************/
+/***** Logging and Tracing *****/
+/*********************************/
+/**
+ * All tracing functions assume printf-style formatting
+ */
+
+#ifndef __FAPI
+/* Trace an informational message */
+#define P8_PORE_ITRACE0(msg) printf("PORE> INFO: " msg "\n");
+#define P8_PORE_ITRACE1(msg, arg0) printf("PORE> INFO: " msg "\n", arg0);
+
+/* Trace an error message */
+#define P8_PORE_ETRACE0(msg) printf("PORE> ERROR: " msg "\n");
+#define P8_PORE_ETRACE1(msg, arg0) printf("PORE> ERROR: " msg "\n", arg0);
+#define P8_PORE_ETRACE2(msg, arg0, arg1) printf("PORE> ERROR: " msg "\n", arg0, arg1);
+#define P8_PORE_ETRACE3(msg, arg0, arg1, arg2) printf("PORE> ERROR: " msg "\n", arg0, arg1, arg2);
+#define P8_PORE_ETRACE4(msg, arg0, arg1, arg2, arg3) printf("PORE> ERROR: " msg "\n", arg0, arg1, arg2, arg3);
+#define P8_PORE_ETRACE5(msg, arg0, arg1, arg2, arg3, arg4) printf("PORE> ERROR: " msg "\n", arg0, arg1, arg2, arg3, arg4);
+#endif
+/* Used for debug, Cronus/FW should leave these empty */
+#define P8_PORE_DTRACE0(msg)
+#define P8_PORE_DTRACE1(msg, arg0)
+#define P8_PORE_DTRACE2(msg, arg0, arg1)
+#define P8_PORE_DTRACE3(msg, arg0, arg1, arg2)
+#define P8_PORE_DTRACE4(msg, arg0, arg1, arg2, arg3)
+
+/****** Following is only used for debug purposes ******/
+/* FW/Cronus should NOT include this section */
+/* DTRACE - Print debug statements to command line */
+/* FTRACE - Print text PORE instructions of cpureg setup to DEBUG_FILE */
+/*
+#define P8_PORE_DTRACE0(msg) printf("PORE> DEBUG: " msg "\n");
+#define P8_PORE_DTRACE1(msg, arg0) printf("PORE> DEBUG: " msg "\n", arg0);
+#define P8_PORE_DTRACE2(msg, arg0, arg1) printf("PORE> DEBUG: " msg "\n", arg0, arg1);
+#define P8_PORE_DTRACE3(msg, arg0, arg1, arg2) printf("PORE> DEBUG: " msg "\n", arg0, arg1, arg2);
+#define P8_PORE_DTRACE4(msg, arg0, arg1, arg2, arg3) printf("PORE> DEBUG: " msg "\n", arg0, arg1, arg2, arg3);
+*/
+
+/**********************************/
+/***** Endian-ness Handling *****/
+/**********************************/
+/**
+ * Handle byte-swapping if necessary
+ */
+
+/* Default to big-endian format on both sides */
+#define P8_PORE_HOST_TO_BIG32( bit32_int ) htonl(bit32_int)
+#define P8_PORE_BIG32_TO_HOST( bit32_int ) ntohl(bit32_int)
+#define P8_PORE_HOST_TO_BIG16( bit16_int ) htonl(bit16_int)
+#define P8_PORE_BIG16_TO_HOST( bit16_int ) ntohl(bit16_int)
+
+/*
+*************** Do not edit this area ***************
+This section is automatically updated by CVS when you check in this file.
+Be sure to create CVS comments when you commit so that they can be included here.
+
+$Log: p8_pore_api_custom.h,v $
+Revision 1.5 2012/05/22 21:25:21 cmolsen
+Updated to remove FAPI tracing, which is not allowed in plain C files.
+
+Revision 1.4 2012/05/21 14:45:41 cmolsen
+Updated to address Gerrit review II comments about printf() usage.
+
+Revision 1.3 2012/05/15 19:53:38 cmolsen
+Updated to address Gerrit review comments:
+- Hostboot doesn't support printf().
+
+Revision 1.2 2012/04/13 16:45:32 cmolsen
+Includes __HOSTBOOT_MODULE exclude of <netinit/in.h>
+
+Revision 1.1 2011/08/25 12:28:38 yjkim
+initial check in
+
+Revision 1.10 2010/08/30 23:27:17 schwartz
+Added TRACE statements to include specified number of arguments
+Defined branch type constants
+Added constant for last scom op used to check if operation input to gen_scan is valid
+Added mult spr error constant
+Added p7p_pore_gen_wait API
+Changed additional C++ style comments to C style
+Initialized all variables to 0
+Removed FTRACE statements
+Added additional information to trace statements
+Updated gen_scom to use the defined operation constants
+Updated branch gen_relbranch to use defined branch type constants
+Added rc check for calls to p7p_pore_gen_cpureg_status and p7p_pore_span_128byte_boundary subroutines
+
+Revision 1.9 2010/08/30 14:57:54 schwartz
+Removed FTRACE and associated #define statements
+Changed TRACE macros to multiple macros with specified number of args
+
+Revision 1.6 2010/08/26 15:13:34 schwartz
+Fixed more C++ style comments to C style comments
+
+Revision 1.5 2010/06/23 23:06:37 schwartz
+Defined additional trace functions to be used for debugging, not in FW or Cronus
+
+Revision 1.4 2010/05/24 02:34:07 schwartz
+Fixed errors that appear when using -Werrors flag
+Added in cvs logging (hopefully)
+
+
+*/
diff --git a/libpore/p8_pore_table_gen_api.H b/libpore/p8_pore_table_gen_api.H
new file mode 100644
index 0000000..9846dfd
--- /dev/null
+++ b/libpore/p8_pore_table_gen_api.H
@@ -0,0 +1,438 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_pore_table_gen_api.H $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+// $Id: p8_pore_table_gen_api.H,v 1.27 2014/06/02 18:21:55 cmolsen Exp $
+/*------------------------------------------------------------------------------*/
+/* *! (C) Copyright International Business Machines Corp. 2012 */
+/* *! All Rights Reserved -- Property of IBM */
+/* *! *** IBM Confidential *** */
+/*------------------------------------------------------------------------------*/
+/* *! TITLE : p8_pore_table_gen_api.H */
+/* *! DESCRIPTION : Contains all external APIs used by firmware (PHYP) to */
+// generate/modify the P8 PORE SLW image with Ramming and
+// Scomming specific instructions to be executed on exit from
+// Sleep/Winkle. Also contains definitions for the ramming
+// PORE code.
+/* *! OWNER NAME : Michael Olsen Email: cmolsen@us.ibm.com */
+//
+/* *! COMMENTS : *** VERY IMPORTANT *** */
+// The "Shared RAM section", the "Pore RAM section" and the
+// "C-code RAM section" must closely match eachother.
+//
+/*------------------------------------------------------------------------------*/
+
+#ifndef _P8_PORE_TABLE_GEN_API_H
+#define _P8_PORE_TABLE_GEN_API_H
+
+/********************************************************************/
+/* Shared RAM section - begin */
+/* This section MUST perfectly match the "Pore/C-code RAM section". */
+/********************************************************************/
+// Header defs (P8&PORE 64-bit notation where bits are numbered from left-to-right).
+// (Some of these defs are used in the c-specific section further down.)
+// -----------------------------------------------------------------------------
+// Note: SPR register numbers have a swizzle about them per PPC architecture
+// spr(instruction) <- spr5:9 || spr0:4
+//
+// For the PGAS routine, it is assumed that the API does the swizzling upon
+// building the instruction held in this structure
+//
+// Header configuration: CPU Register Operation Header
+// 0 - End: 1=End; 0=More
+// 1 - Reserved
+// 2:3 - Type
+// 00: MTSPR
+// 01: MTMSRD
+// 10: Reserved
+// 11: Reserved
+// 4:13 - SPR number in non-swizzled form (0:9)
+// 14:15 - Reserved for SPR nunmber expansion
+// 16:18 - Thread ID
+// 19:31 - Reserved
+
+#define RAM_HEADER_END_START 0
+#define RAM_HEADER_END_MASK BITS(RAM_HEADER_END_START,1)
+#define RAM_HEADER_TYPE_START 2
+#define RAM_HEADER_TYPE_MASK BITS(RAM_HEADER_TYPE_START,2)
+#define RAM_HEADER_SPRN_START 4
+#define RAM_HEADER_SPRN_MASK BITS(RAM_HEADER_SPRN_START,10)
+#define RAM_HEADER_THREAD_START 16
+#define RAM_HEADER_THREAD_MASK BITS(RAM_HEADER_THREAD_START,3)
+#define RAM_INSTR_START 32
+#define RAM_INSTR_MASK BITS(RAM_INSTR_START,32)
+// MTSPR instr defs
+#define RAM_MTSPR_INSTR_TEMPL ( ( (uint64_t)31<<(63-5) | (uint64_t)467<<(63-30) ) )
+#define RAM_MTSPR_SPR_START 11
+#define RAM_MTSPR_SPR_MASK BITS(RAM_MTSPR_SPR_START,10)
+// Thread align defs
+#define RAM_HEADER_THREAD_RALIGN ( 61-16 ) // 3 Bit shift right amount
+#define RAM_HEADER_THREAD_LALIGN ( 61-16 ) // 3 Bit shift left amount
+/********************************************************************/
+/* Shared RAM section - end */
+/********************************************************************/
+
+
+#ifdef FOR_PORE_RAMMING
+
+// Thread status
+CONST_UINT64_T( PROC_RAS_STAT_10013002 , ULL(0x10013002) );
+
+// TCTL RAS Status (for each thread)
+// Note: the address is not included in the name to ease PGAS indexing
+// of these registers
+CONST_UINT64_T( EX_PERV_TCTL0_R_STAT , ULL(0x10013002) );
+CONST_UINT64_T( EX_PERV_TCTL1_R_STAT , ULL(0x10013012) );
+CONST_UINT64_T( EX_PERV_TCTL2_R_STAT , ULL(0x10013022) );
+CONST_UINT64_T( EX_PERV_TCTL3_R_STAT , ULL(0x10013032) );
+CONST_UINT64_T( EX_PERV_TCTL4_R_STAT , ULL(0x10013042) );
+CONST_UINT64_T( EX_PERV_TCTL5_R_STAT , ULL(0x10013052) );
+CONST_UINT64_T( EX_PERV_TCTL6_R_STAT , ULL(0x10013062) );
+CONST_UINT64_T( EX_PERV_TCTL7_R_STAT , ULL(0x10013072) );
+
+// Thread scratch registers
+// Note: the address is not included in the name to ease PGAS indexing
+// of these registers
+CONST_UINT64_T( EX_PERV_SCRATCH0 , ULL(0x10013283) );
+CONST_UINT64_T( EX_PERV_SCRATCH1 , ULL(0x10013284) );
+CONST_UINT64_T( EX_PERV_SCRATCH2 , ULL(0x10013285) );
+CONST_UINT64_T( EX_PERV_SCRATCH3 , ULL(0x10013286) );
+CONST_UINT64_T( EX_PERV_SCRATCH4 , ULL(0x10013287) );
+CONST_UINT64_T( EX_PERV_SCRATCH5 , ULL(0x10013288) );
+CONST_UINT64_T( EX_PERV_SCRATCH6 , ULL(0x10013289) );
+CONST_UINT64_T( EX_PERV_SCRATCH7 , ULL(0x1001328A) );
+
+// Ramming settings.
+CONST_UINT64_T( RAM_STATUS_REG_AFTER_RAM, 0x5000000000000000);
+CONST_UINT64_T( RAM_COMPLETE_POLLS, 0x0000000000000040);
+
+// mfspr gpr0, scratch0 opcode left-shifted 29 bits, ready for ramming.
+CONST_UINT64_T( MTSPR_SCRATCH0_GPR0_RAM_READY, (0x000000007C1543A6<<29));
+CONST_UINT64_T( MFSPR_GPR0_SCRATCH0_RAM_READY, (0x000000007C1542A6<<29));
+CONST_UINT64_T( MTMSRD_GPR0_RAM_READY, (0x000000007C000164<<29));
+CONST_UINT64_T( MFMSR_GPR0_RAM_READY, (0x000000007C0000A6<<29));
+
+// Predefined MSR content during Ramming
+CONST_UINT64_T( P8_PORE_MSR_DURING_RAM, (0x9000000002802000) );
+
+// "reset" value of SCRATCH0 to ensure it gets updated from GPR0
+CONST_UINT64_T( SCRATCH0_RESET_VALUE, (0xABBA99EBBA33DADA) );
+
+#ifdef __ASSEMBLER__
+
+/***********************************************************************/
+/* Pore RAM section - begin */
+/* This section MUST perfectly match the "Shared/C-code RAM section". */
+/***********************************************************************/
+.set RAM_HEADER, 0
+.set RAM_INSTR, 4
+.set RAM_DATA, 8
+.set RAM_ENTRY_LENGTH, 16
+/***********************************************************************/
+/* Pore RAM section - end */
+/***********************************************************************/
+
+#endif // __ASSEMBLER__
+
+
+#else // Not FOR_PORE_RAMMING
+
+
+//#include <stdio.h>
+#ifndef PPC_HYP
+#include <stdlib.h>
+#endif // PPC_HYP
+#ifndef __P8_PORE_TABLE_GEN_API_C
+#include <p8_pore_api_custom.h>
+#endif
+//#include <stdint.h>
+
+//#include <pore_bitmanip.H>
+// Defining local versions of BITS and BIT
+// Create a multi-bit mask of \a n bits starting at bit \a b
+#define BITS(b, n) ((ULL(0xffffffffffffffff) << (64 - (n))) >> (b))
+#define BITS32(b,n) (uint32_t)((ULL(0xffffffff) << (32 - (n))) >> (b))
+// Create a single bit mask at bit \a b
+#define BIT(b) BITS((b), 1)
+
+// Header defs (C notation where bits are numbered from right-to-left, and reducing to 32-bit)
+#define RAM_HEADER_END_START_C ( 31-RAM_HEADER_END_START+1-1 )
+#define RAM_HEADER_END_MASK_C (uint32_t)(RAM_HEADER_END_MASK>>32)
+#define RAM_HEADER_TYPE_START_C ( 31-RAM_HEADER_TYPE_START+1-2 )
+#define RAM_HEADER_TYPE_MASK_C (uint32_t)(RAM_HEADER_TYPE_MASK>>32)
+#define RAM_HEADER_SPRN_START_C ( 31-RAM_HEADER_SPRN_START+1-10 )
+#define RAM_HEADER_SPRN_MASK_C (uint32_t)(RAM_HEADER_SPRN_MASK>>32)
+#define RAM_HEADER_THREAD_START_C ( 31-RAM_HEADER_THREAD_START+1-3 )
+#define RAM_HEADER_THREAD_MASK_C (uint32_t)(RAM_HEADER_THREAD_MASK>>32)
+// MTSPR instr defs
+#define RAM_MTSPR_INSTR_TEMPL_C ( ( (uint32_t)31<<(31-5) | (uint32_t)467<<(31-30) ) )
+#define RAM_MTSPR_SPR_START_C ( 31-RAM_MTSPR_SPR_START+1-10 )
+//#define RAM_MTSPR_SPR_MASK_C (uint32_t)(BITS(RAM_MTSPR_SPR_START,10)>>32)
+#define RAM_MTSPR_SPR_MASK_C (uint32_t)(RAM_MTSPR_SPR_MASK>>32)
+// MTMSR innstr def
+#define RAM_MTMSRD_INSTR_TEMPL_C ( ( (uint32_t)31<<(31-5) | (uint32_t)178<<(31-30) ) )
+
+/* Other defs needed for ramming and scomming */
+// TOC names
+#define SLW_HOST_REG_VECTOR_TOC_NAME "slw_host_reg_vector"
+#define SLW_HOST_SCOM_NC_VECTOR_TOC_NAME "slw_host_scom_nc_vector"
+#define SLW_HOST_SCOM_L2_VECTOR_TOC_NAME "slw_host_scom_l2_vector"
+#define SLW_HOST_SCOM_L3_VECTOR_TOC_NAME "slw_host_scom_l3_vector"
+
+// Defines for slw_build() to update "runtime_scom" pointers w/pointer to
+// "sub_slw_runtime_scom" subroutines at SLW image build time.
+#define HOST_RUNTIME_SCOM_TOC_NAME "host_runtime_scom" // Null 1st, then fill w/addr of SLW_RUNTIME_SCOM_TOC_NAME
+#define SLW_RUNTIME_SCOM_TOC_NAME "sub_slw_runtime_scom"
+
+// The following two provide TRANSITIONAL SUPPORT only. TO BE REMOVED ASAP.
+#define EX_ENABLE_RUNTIME_SCOM_TOC_NAME "ex_enable_runtime_scom"
+#define SLW_EX_ENABLE_RUNTIME_SCOM_TOC_NAME "sub_slw_ex_enable_runtime_scom"
+
+#define SCAN_MAX_ROTATE_38XXX_NAME "scan_max_rotate_38xxx"
+#define SCAN_ROTATE_DEFAULT 110 // Limit suggested by Tilman.
+#define SCAN_MAX_ROTATE 0x00000FE0
+#define SCAN_MAX_ROTATE_LONG 0x000FFFFF // All 1s in BITS 12->31.
+//#define SCAN_MAX_ROTATE_LONG 0x000000D0 // Experimental max val
+
+#define OVER_SAMPLING_POLL 10
+#define WAITS_POLL_MIN 32
+
+// RAM table defines
+#define XIPSIZE_RAM_ENTRY ( (sizeof(RamTableEntry)+7)/8*8 )
+#define SLW_MAX_CORES 16
+#define SLW_MAX_CPUREGS_CORE 10
+#define SLW_MAX_CPUREGS_THREADS 5
+#define SLW_CORE_THREADS 8
+#define SLW_MAX_CPUREGS_OPS ( SLW_MAX_CPUREGS_CORE + \
+ SLW_CORE_THREADS*SLW_MAX_CPUREGS_THREADS )
+#define SLW_RAM_TABLE_SPACE_PER_CORE ( SLW_MAX_CPUREGS_OPS * XIPSIZE_RAM_ENTRY )
+#define SLW_RAM_TABLE_SIZE ( SLW_MAX_CORES * SLW_RAM_TABLE_SPACE_PER_CORE )
+
+// SPR and MSR values for i_regName
+enum {
+ P8_SPR_HRMOR = 313,
+ P8_SPR_HMEER = 337,
+ P8_SPR_PMICR = 852,
+ P8_SPR_PMCR = 884,
+ P8_SPR_HID0 = 1008,
+ P8_SPR_HID1 = 1009,
+ P8_SPR_HID4 = 1012,
+ P8_SPR_HID5 = 1014,
+ P8_CORE_XTRA8 =10008,
+ P8_CORE_XTRA9 =10009,
+ P8_SPR_HSPRG0 = 304,
+ P8_SPR_LPCR = 318,
+ P8_MSR_MSR = 2000,
+ P8_THRD_XTRA3 =20003,
+ P8_THRD_XTRA4 =20004
+};
+
+// SCOM table defines - Common
+#define XIPSIZE_SCOM_ENTRY 16
+
+// SCOM table defines - Non-cache section
+#define SLW_MAX_SCOMS_NC 32
+#define SLW_SCOM_TABLE_SPACE_PER_CORE_NC ( (SLW_MAX_SCOMS_NC+1)*XIPSIZE_SCOM_ENTRY ) // Add 1 for RNNN IIS
+#define SLW_SCOM_TABLE_SIZE_NC ( SLW_MAX_CORES * SLW_SCOM_TABLE_SPACE_PER_CORE_NC )
+
+// SCOM table defines - L2 section
+#define SLW_MAX_SCOMS_L2 16
+#define SLW_SCOM_TABLE_SPACE_PER_CORE_L2 ( (SLW_MAX_SCOMS_L2+1)*XIPSIZE_SCOM_ENTRY ) // Add 1 for RNNN IIS
+#define SLW_SCOM_TABLE_SIZE_L2 ( SLW_MAX_CORES * SLW_SCOM_TABLE_SPACE_PER_CORE_L2 )
+
+// SCOM table defines - L3 section
+#define SLW_MAX_SCOMS_L3 16
+#define SLW_SCOM_TABLE_SPACE_PER_CORE_L3 ( (SLW_MAX_SCOMS_L3+1)*XIPSIZE_SCOM_ENTRY ) // Add 1 for RNNN IIS
+#define SLW_SCOM_TABLE_SIZE_L3 ( SLW_MAX_CORES * SLW_SCOM_TABLE_SPACE_PER_CORE_L3 )
+
+#define SLW_SCOM_TABLE_SIZE_ALL ( SLW_SCOM_TABLE_SIZE_NC + SLW_SCOM_TABLE_SIZE_L2 + SLW_SCOM_TABLE_SIZE_L3)
+
+// RAM and SCOM sub-section offsets from beginning of .slw section.
+#define SLW_RAM_TABLE_OFFSET 0
+#define SLW_SCOM_TABLE_OFFSET_NC (SLW_RAM_TABLE_OFFSET + SLW_RAM_TABLE_SIZE)
+#define SLW_SCOM_TABLE_OFFSET_L2 (SLW_SCOM_TABLE_OFFSET_NC + SLW_SCOM_TABLE_SIZE_NC)
+#define SLW_SCOM_TABLE_OFFSET_L3 (SLW_SCOM_TABLE_OFFSET_L2 + SLW_SCOM_TABLE_SIZE_L2)
+#define SLW_TABLE_SIZE_ALL (SLW_RAM_TABLE_SIZE + SLW_SCOM_TABLE_SIZE_ALL)
+
+// Enumeration of Scom sections in .slw section.
+enum {
+ P8_SCOM_SECTION_NC = 0,
+ P8_SCOM_SECTION_L2 = 1,
+ P8_SCOM_SECTION_L3 = 2,
+ P8_SCOM_SECTION_MAX_VALUE = 2
+};
+
+// SLW section size (Ensure 128-byte alignment.)
+#define FIXED_SLW_SECTION_SIZE (SLW_TABLE_SIZE_ALL/128+(SLW_TABLE_SIZE_ALL%128+127)/128)*128
+
+// FFDC section size (Ensure 128-byte alignment.)
+#define FIXED_FFDC_SECTION_SIZE 640*(SLW_MAX_CORES+1)
+
+// SCOM/CID masks and ranges
+#define P8_CID_EX_LOW 0x10 // Lowest EX chiplet addr
+#define P8_CID_EX_HIGH 0x1f // Highest EX chiplet addr
+
+// SCOM Operators
+#define P8_PORE_SCOM_FIRST_OP 0 // First supported Scom operation.
+#define P8_PORE_SCOM_APPEND 0 // Add Scom to end of table or at first PORE NOP
+ // instruction, whichever comes first.
+#define P8_PORE_SCOM_REPLACE 1 // Repl 1st matching Scom addr or treat as APPEND
+ // if Scom entry is not found.
+#define P8_PORE_SCOM_OR 2 // Overlay data onto existing Scom by bitwise OR.
+#define P8_PORE_SCOM_AND 3 // Overlay data onto existing Scom by bitwise AND.
+#define P8_PORE_SCOM_NOOP 4 // Replace existing Scom with a PORE NOP instruction,
+ // NNNN.
+#define P8_PORE_SCOM_RESET 5 // Delete all entries for given coreID. Replace with
+ // PORE RET instructions, RNNN.
+#define P8_PORE_SCOM_OR_APPEND 6 // Same as OR but treat as APPEND if Scom entry is
+ // not found.
+#define P8_PORE_SCOM_AND_APPEND 7 // Same as AND but treat as APPEND if Scom entry is
+ // not found.
+#define P8_PORE_SCOM_LAST_OP 7 // Last supported Scom operation.
+
+
+// Enumeration of SLW image build modes.
+enum {
+ P8_SLW_MODEBUILD_IPL = 0,
+ P8_SLW_MODEBUILD_REBUILD = 1,
+ P8_SLW_MODEBUILD_SRAM = 2,
+ P8_SLW_MODEBUILD_MAX_VALUE = 2
+};
+
+
+// Return codes
+#define SLW_RAM_SUCCESS 0
+#define SLW_RAM_HEADERS_NOT_SYNCED 1
+#define SLW_RAM_IMAGE_SIZE_MISMATCH 2
+#define SLW_RAM_TABLE_ENTRY_OVERFLOW 3
+#define SLW_RAM_CODE_ERROR 4
+#define SLW_RAM_INVALID_PARAMETER 5
+#define SLW_RAM_WARNING_TABLE_CONTAMINATION 6
+
+
+#ifndef PPC_HYP
+#ifdef __cplusplus
+extern "C" {
+#endif
+#endif // PPC_HYP
+
+/********************************************************************/
+/* C-code RAM section - begin */
+/* This section MUST perfectly match the "Shared/Pore RAM section". */
+/********************************************************************/
+typedef struct ram_instr_t {
+ uint32_t header;
+ uint32_t instr;
+ uint64_t data;
+} RamTableEntry;
+/********************************************************************/
+/* C-code RAM section - end */
+/********************************************************************/
+
+// SLW supported SPR registers
+typedef struct {
+ const char *name;
+ uint32_t value;
+ uint32_t swizzled;
+} SlwSprRegs;
+
+extern const SlwSprRegs SLW_SPR_REGS[];
+extern const int SLW_SPR_REGS_SIZE;
+
+/* Name: p8_pore_gen_cpureg()
+ * Description: Populates ramming entries in the .slw section
+ * Parameter list: i_image - pointer to SLW mainstore image
+ * i_sizeImage - size of SLW mainstore image
+ * i_regName - unswizzled SPR register value
+ * i_regData - data to write to SPR register
+ * i_coreId - the core ID to operate on
+ * i_threadId - the thread ID to operate on
+ */
+uint32_t p8_pore_gen_cpureg(void *io_image,
+ uint32_t i_sizeImage,
+ uint32_t i_regName,
+ uint64_t i_regData,
+ uint32_t i_coreId,
+ uint32_t i_threadId);
+
+/* Name: p8_pore_gen_scom()
+ * Description: Populates scom entries in the .slw section
+ * Parameter list: i_image - pointer to SLW mainstore image
+ * i_sizeImage - size of SLW mainstore image
+ * i_scomAddr - scom register address
+ * i_coreId - the core ID [0:15]
+ * i_scomData - 64-bit data to put in scom register
+ * i_operation - what to do with the scom data [0:5]
+ * i_section - SCOM section [0,2,3]
+ */
+uint32_t p8_pore_gen_scom(void *io_image,
+ uint32_t i_sizeImage,
+ uint32_t i_scomAddr,
+ uint32_t i_coreId,
+ uint64_t i_scomData,
+ uint32_t i_operation,
+ uint32_t i_section);
+
+
+/* Name: p8_pore_gen_cpureg_fixed()
+ * Description: Populates ramming entries in the .slw section
+ * Parameter list: i_image - pointer to SLW mainstore image
+ * i_modeBuild - 0: HB/IPL mode, 1: PHYP/Rebuild mode, 2: SRAM mode.
+ * i_sizeImage - size of SLW mainstore image
+ * i_regName - unswizzled SPR register value
+ * i_regData - data to write to SPR register
+ * i_coreId - the core ID to operate on
+ * i_threadId - the thread ID to operate on
+ */
+uint32_t p8_pore_gen_cpureg_fixed(void *io_image,
+ uint8_t i_modeBuild,
+ uint32_t i_regName,
+ uint64_t i_regData,
+ uint32_t i_coreId,
+ uint32_t i_threadId);
+
+/* Name: p8_pore_gen_scom_fixed()
+ * Description: Populates scom entries in the .slw section
+ * Parameter list: i_image - pointer to SLW mainstore image
+ * i_modeBuild - 0: HB/IPL mode, 1: PHYP/Rebuild mode, 2: SRAM mode.
+ * i_scomAddr - scom register address
+ * i_coreId - the core ID [0:15]
+ * i_scomData - 64-bit data to put in scom register
+ * i_operation - what to do with the scom data [0:5]
+ * i_section - 0: General Scoms, 1: L2 cache, 2: L3 cache
+ */
+uint32_t p8_pore_gen_scom_fixed(void *io_image,
+ uint8_t i_modeBuild,
+ uint32_t i_scomAddr,
+ uint32_t i_coreId,
+ uint64_t i_scomData,
+ uint32_t i_operation,
+ uint32_t i_section);
+
+#ifndef PPC_HYP
+#ifdef __cplusplus
+}
+#endif
+#endif // PPC_HYP
+
+#endif // FOR_PORE_RAMMING
+
+#endif // _P8_PORE_TABLE_GEN_API_H
diff --git a/libpore/p8_pore_table_gen_api_fixed.C b/libpore/p8_pore_table_gen_api_fixed.C
new file mode 100644
index 0000000..60c0679
--- /dev/null
+++ b/libpore/p8_pore_table_gen_api_fixed.C
@@ -0,0 +1,844 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_pore_table_gen_api_fixed.C $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2013,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+// $Id: p8_pore_table_gen_api_fixed.C,v 1.15 2014/05/30 20:31:24 cmolsen Exp $
+//
+/*------------------------------------------------------------------------------*/
+/* *! (C) Copyright International Business Machines Corp. 2012 */
+/* *! All Rights Reserved -- Property of IBM */
+/* *! *** IBM Confidential *** */
+/*------------------------------------------------------------------------------*/
+/* *! TITLE : p8_pore_table_gen_api_fixed.C */
+/* *! DESCRIPTION : PORE SLW table generaion APIs */
+/* *! OWNER NAME : Michael Olsen Email: cmolsen@us.ibm.com */
+//
+/* *! USAGE : To build for PHYP command-line - */
+/* buildecmdprcd -C "p8_pore_table_gen_api_fixed.C" -c "p8_pore_table_static_data.c,sbe_xip_image.c,pore_inline_assembler.c" -u "SLW_COMMAND_LINE_RAM" p8_pore_table_gen_api_fixed_main.C */
+//
+/* *! COMMENTS : - The DYNAMIC_RAM_TABLE_PPD was dropped in v1.12 of this */
+/* code. See v1.12 for explanation and code implementation. */
+//
+/*------------------------------------------------------------------------------*/
+
+#define __P8_PORE_TABLE_GEN_API_C
+#include <p8_pore_api_custom.h>
+#include <p8_pore_table_gen_api.H>
+#include <p8_delta_scan_rw.h>
+
+/*
+// io_image - pointer to SLW image
+// i_modeBuild - 0: HB/IPL mode, 1: PHYP/Rebuild mode, 2: SRAM mode.
+// i_regName - unswizzled enum SPR value (NOT a name)
+// i_regData - data to write
+// i_coreIndex - core ID = [0:15]
+// i_threadIndex - thread to operate on = [0:7].
+*/
+uint32_t p8_pore_gen_cpureg_fixed( void *io_image,
+ uint8_t i_modeBuild,
+ uint32_t i_regName,
+ uint64_t i_regData,
+ uint32_t i_coreId,
+ uint32_t i_threadId)
+{
+ uint32_t rc=0, rcLoc=0, iCount=0;
+ int i=0, iReg=-1;
+ uint64_t xipSlwRamSection;
+ void *hostSlwRamSection;
+ void *hostSlwSectionFixed;
+ uint64_t xipRamTableThis;
+ void *hostRamVector;
+ void *hostRamTableThis=NULL;
+ void *hostRamEntryThis=NULL, *hostRamEntryNext=NULL;
+ uint8_t bNewTable=0, bFound=0;
+ uint8_t bEntryEnd=1, headerType=0;
+ SbeXipSection xipSection;
+ SbeXipItem xipTocItem;
+ RamTableEntry ramEntryThis, *ramEntryNext;
+ uint32_t sprSwiz=0;
+ uint8_t bReplaceEntry=0;
+ uint32_t headerNext=0;
+ uint32_t instrNext=0;
+
+
+ // -------------------------------------------------------------------------
+ // Validate Ramming parameters.
+ //
+ // ...check mode build
+ if (i_modeBuild>P8_SLW_MODEBUILD_MAX_VALUE) {
+ MY_ERR("modeBuild=%i invalid. Valid range is [0;%i].",
+ i_modeBuild,P8_SLW_MODEBUILD_MAX_VALUE);
+ rcLoc = 1;
+ }
+ // ...check register value
+ bFound = 0;
+ for (i=0;i<SLW_SPR_REGS_SIZE;i++) {
+ if (i_regName==SLW_SPR_REGS[i].value) {
+ bFound = 1;
+ iReg = i;
+ break;
+ }
+ }
+ if (!bFound) {
+ MY_ERR("Register value = %i is not supported.\n",i_regName);
+ MY_ERR("The following registers are supported:\n");
+ for (i=0;i<SLW_SPR_REGS_SIZE;i++)
+ MY_ERR("\t(%s,%i)\n",SLW_SPR_REGS[i].name,SLW_SPR_REGS[i].value);
+ rcLoc = 1;
+ }
+ // ...check core ID
+ if (i_coreId>=SLW_MAX_CORES) {
+ MY_ERR("Core ID = %i is not within valid range of [0;%i]\n",i_coreId,SLW_MAX_CORES-1);
+ rcLoc = 1;
+ }
+ // ...check thread ID
+ // - ensure it's zero if SPR is not thread scoped, i.e. if SPR is core scoped.
+ // - error out if threadId exceed max num of threads.
+ if (i_regName!=P8_SPR_HSPRG0 && i_regName!=P8_SPR_LPCR && i_regName!=P8_MSR_MSR) {
+ i_threadId = 0;
+ }
+ if (i_threadId>=SLW_CORE_THREADS) {
+ MY_ERR("Thread ID = %i is not within valid range of [0;%i]\n",i_threadId,SLW_CORE_THREADS-1);
+ rcLoc = 1;
+ }
+ if (rcLoc)
+ return IMGBUILD_ERR_RAM_INVALID_PARM;
+ rcLoc = 0;
+
+ // -------------------------------------------------------------------------
+ // Check slw section location and size. (Mainly needed for fixed image.)
+ //
+ if (i_modeBuild==P8_SLW_MODEBUILD_IPL ||
+ i_modeBuild==P8_SLW_MODEBUILD_REBUILD) { // Fixed image.
+ hostSlwSectionFixed = (void*)( (uintptr_t)io_image +
+ FIXED_SLW_IMAGE_SIZE -
+ FIXED_FFDC_SECTION_SIZE -
+ FIXED_SLW_SECTION_SIZE );
+ // Even though we shouldn't call this api during a rebuild, it should be
+ // safe to do so in this particular case since none of the info requested
+ // is supposed to be moved during a rebuild.
+ rc = sbe_xip_get_section( io_image, SBE_XIP_SECTION_SLW, &xipSection);
+ if (rc) {
+ MY_ERR("Probably invalid section name for SBE_XIP_SECTION_SLW.\n");
+ return IMGBUILD_ERR_GET_SECTION;
+ }
+ hostSlwRamSection = (void*)((uintptr_t)io_image + xipSection.iv_offset);
+ if (hostSlwSectionFixed!=hostSlwRamSection) {
+ MY_ERR("hostSlwSectionFixed != hostSlwRamSection(from image api).\n");
+ return IMGBUILD_ERR_RAM_HDRS_NOT_SYNCED;
+ }
+ else {
+ MY_INF("hostSlwSectionFixed == hostSlwRamSection(from image api).\n");
+ }
+ }
+ else { // SRAM non-fixed image.
+ rc = sbe_xip_get_section( io_image, SBE_XIP_SECTION_SLW, &xipSection);
+ if (rc) {
+ MY_ERR("Probably invalid section name for SBE_XIP_SECTION_SLW.\n");
+ return IMGBUILD_ERR_GET_SECTION;
+ }
+ hostSlwRamSection = (void*)((uintptr_t)io_image + xipSection.iv_offset);
+ sbe_xip_host2pore( io_image, hostSlwRamSection, &xipSlwRamSection);
+ }
+
+ // -------------------------------------------------------------------------
+ // Cross check SPR register and table defines
+ //
+ if (SLW_SPR_REGS_SIZE!=(SLW_MAX_CPUREGS_CORE+SLW_MAX_CPUREGS_THREADS)) {
+ MY_ERR("Defines in *.H header file not in sync.\n");
+ return IMGBUILD_ERR_RAM_HDRS_NOT_SYNCED;
+ }
+ if (xipSection.iv_size!=FIXED_SLW_SECTION_SIZE) {
+ MY_ERR("Fixed SLW table size in *.H header file differs from SLW section size in image.\n");
+ MY_ERR("Check code or image version.\n");
+ return IMGBUILD_ERR_RAM_HDRS_NOT_SYNCED;
+ }
+
+ // -------------------------------------------------------------------------
+ // Summarize parameters and checking results.
+ //
+ MY_INF("Input parameter checks - OK\n");
+ MY_INF("\tMode build= %i\n",i_modeBuild);
+ MY_INF("\tRegister = (%s,%i)\n",SLW_SPR_REGS[iReg].name,SLW_SPR_REGS[iReg].value);
+ MY_INF("\tCore ID = %i\n",i_coreId);
+ MY_INF("\tThread ID = %i\n",i_threadId);
+ MY_INF("Image validation and size checks - OK\n");
+ MY_INF("\tSLW section size= %i\n",xipSection.iv_size);
+
+ // -------------------------------------------------------------------------
+ // Locate RAM vector and locate RAM table associated with "This" core ID.
+ //
+ if (i_modeBuild==P8_SLW_MODEBUILD_IPL ||
+ i_modeBuild==P8_SLW_MODEBUILD_REBUILD) { // Fixed image.
+ hostRamTableThis = (void*)( (uintptr_t)io_image +
+ FIXED_SLW_IMAGE_SIZE -
+ FIXED_FFDC_SECTION_SIZE -
+ FIXED_SLW_SECTION_SIZE +
+ SLW_RAM_TABLE_SPACE_PER_CORE*i_coreId );
+ if (*(uintptr_t*)hostRamTableThis) { // Table content NOT empty.
+ bNewTable = 0; // So, NOT new table.
+ }
+ else { // Table content empty.
+ bNewTable = 1; // So, new table.
+ }
+ }
+ else { // SRAM non-fixed image.
+ rc = sbe_xip_find( io_image, SLW_HOST_REG_VECTOR_TOC_NAME, &xipTocItem);
+ if (rc) {
+ MY_ERR("Probably invalid key word for SLW_HOST_REG_VECTOR_TOC_NAME.\n");
+ return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
+ }
+ sbe_xip_pore2host( io_image, xipTocItem.iv_address, &hostRamVector);
+ xipRamTableThis = myRev64(*((uint64_t*)hostRamVector + i_coreId));
+ if (xipRamTableThis) {
+ sbe_xip_pore2host( io_image, xipRamTableThis, &hostRamTableThis);
+ bNewTable = 0;
+ }
+ else {
+ hostRamTableThis = (void*)( (uintptr_t)hostSlwRamSection +
+ SLW_RAM_TABLE_SPACE_PER_CORE*i_coreId );
+ bNewTable = 1;
+ }
+ }
+
+
+ // -------------------------------------------------------------------------
+ // Create most of the RAM entry, so it can be used to find a potential existing entry to
+ // replace. Postpone decision about bEntryEnd and assume its zero for now (not end).
+ //
+ if (i_regName==P8_MSR_MSR) {
+ // ...make the MSR header
+ headerType = 0x1; // MTMSRD header.
+ ramEntryThis.header = ( ((uint32_t)headerType) << RAM_HEADER_TYPE_START_C & RAM_HEADER_TYPE_MASK_C ) |
+ ( i_threadId << RAM_HEADER_THREAD_START_C & RAM_HEADER_THREAD_MASK_C );
+ // ...make the MSR instr
+ ramEntryThis.instr = RAM_MTMSRD_INSTR_TEMPL_C;
+ }
+ else {
+ // ...make the SPR header
+ headerType = 0x0; // MTSPR header.
+ ramEntryThis.header = ( ((uint32_t)headerType) << RAM_HEADER_TYPE_START_C & RAM_HEADER_TYPE_MASK_C ) |
+ ( i_regName << RAM_HEADER_SPRN_START_C & RAM_HEADER_SPRN_MASK_C ) |
+ ( i_threadId << RAM_HEADER_THREAD_START_C & RAM_HEADER_THREAD_MASK_C );
+ // ...make the SPR instr
+ sprSwiz = i_regName>>5 | (i_regName & 0x0000001f)<<5;
+ if (sprSwiz!=SLW_SPR_REGS[iReg].swizzled) {
+ MY_ERR("Inconsistent swizzle rules implemented. Check code. Dumping data.\n");
+ MY_ERR("\tsprSwiz (on-the-fly-calc)=%i\n",sprSwiz);
+ MY_ERR("\tSLW_SPR_REGS[%i].swizzled=%i\n",iReg,SLW_SPR_REGS[iReg].swizzled);
+ return IMGBUILD_ERR_RAM_CODE;
+ }
+ ramEntryThis.instr = RAM_MTSPR_INSTR_TEMPL_C | ( ( sprSwiz<<RAM_MTSPR_SPR_START_C ) & RAM_MTSPR_SPR_MASK_C );
+ }
+ // ...make the data
+ ramEntryThis.data = i_regData;
+
+
+
+ // -------------------------------------------------------------------------
+ // Determine insertion point of new RAM entry, hostRamEntryThis. The possibilities are:
+ // - New table => First entry
+ // - Existing Ram entry => Replace said entry
+ // - Existing table, new Ram entry => Last entry
+ //
+ bReplaceEntry = 0;
+ if (bNewTable) {
+ // Append to beginning of agreed upon static Ram table position for this coreId.
+ bEntryEnd = 1;
+ ramEntryThis.header = ( ((uint32_t)bEntryEnd) << RAM_HEADER_END_START_C & RAM_HEADER_END_MASK_C ) |
+ ramEntryThis.header;
+ hostRamEntryThis = hostRamTableThis;
+ if (i_modeBuild==P8_SLW_MODEBUILD_SRAM) {
+ // Update RAM vector (since it is currently NULL)
+ *((uint64_t*)hostRamVector + i_coreId) =
+ myRev64( xipSlwRamSection + SLW_RAM_TABLE_SPACE_PER_CORE*i_coreId );
+ }
+ }
+ else {
+ // Append at end of existing Ram table for this coreId
+ // or
+ // Replace an existing Ram entry
+ hostRamEntryNext = hostRamTableThis;
+ ramEntryNext = (RamTableEntry*)hostRamEntryNext;
+ headerNext = myRev32(ramEntryNext->header);
+ instrNext = myRev32(ramEntryNext->instr);
+ iCount = 1;
+ // Examine all entries, except last entry.
+ while ((headerNext & RAM_HEADER_END_MASK_C)==0 && bReplaceEntry==0) {
+ if (iCount>=SLW_MAX_CPUREGS_OPS) {
+ MY_ERR("Bad table! Header end bit not found and RAM table full (=%i entries).\n",SLW_MAX_CPUREGS_OPS);
+ return IMGBUILD_ERR_RAM_TABLE_END_NOT_FOUND;
+ }
+ if (ramEntryThis.header==headerNext && ramEntryThis.instr==instrNext) {
+ // Its a replacement. Stop searching. Go do the replacement.
+ bReplaceEntry = 1;
+ hostRamEntryThis = hostRamEntryNext;
+ }
+ else {
+ hostRamEntryNext = (void*)((uint8_t*)hostRamEntryNext + XIPSIZE_RAM_ENTRY);
+ ramEntryNext = (RamTableEntry*)hostRamEntryNext;
+ headerNext = myRev32(ramEntryNext->header);
+ instrNext = myRev32(ramEntryNext->instr);
+ iCount++;
+ }
+ }
+ if (bReplaceEntry==0) {
+ // Examine the last entry.
+ if (headerNext & RAM_HEADER_END_MASK_C) {
+ // Now we know for sure that our new Ram entry will also be the last, either as a
+ // replace or append. So put the end bit into the new entry.
+ bEntryEnd = 1;
+ ramEntryThis.header = ( ((uint32_t)bEntryEnd) << RAM_HEADER_END_START_C & RAM_HEADER_END_MASK_C ) |
+ ramEntryThis.header;
+ // Determine if to replace or append.
+ if (ramEntryThis.header==headerNext && ramEntryThis.instr==instrNext) {
+ // Its a replacement. And it would be legal to replace the very last Ram in a completely full table.
+ if (iCount<=SLW_MAX_CPUREGS_OPS) {
+ bReplaceEntry = 1;
+ hostRamEntryThis = hostRamEntryNext;
+ }
+ else {
+ MY_ERR("RAM table is full. Max %i entries allowed.\n",SLW_MAX_CPUREGS_OPS);
+ return IMGBUILD_ERR_RAM_TABLE_FULL;
+ }
+ }
+ else {
+ // Its an append. Make sure there's room for one more Ram entry.
+ if (iCount<SLW_MAX_CPUREGS_OPS) {
+ // Zero out the end bit in last entrys header (which will now be 2nd last).
+ ramEntryNext->header = ramEntryNext->header & myRev32(~RAM_HEADER_END_MASK_C);
+ hostRamEntryThis = (void*)((uint8_t*)hostRamEntryNext + XIPSIZE_RAM_ENTRY);
+ }
+ else {
+ MY_ERR("RAM table is full. Max %i entries allowed.\n",SLW_MAX_CPUREGS_OPS);
+ return IMGBUILD_ERR_RAM_TABLE_FULL;
+ }
+ }
+ }
+ else {
+ MY_ERR("We should never get here. Check code. Dumping data:\n");
+ MY_ERR("myRev32(ramEntryNext->header) = 0x%08x\n",myRev32(ramEntryNext->header));
+ MY_ERR("RAM_HEADER_END_MASK_C = 0x%08x\n",RAM_HEADER_END_MASK_C);
+ return IMGBUILD_ERR_RAM_CODE;
+ }
+ }
+ }
+
+
+ // Summarize new table entry data
+ MY_INF("New table entry data (host format):\n");
+ MY_INF("\theader = 0x%08x\n",ramEntryThis.header);
+ MY_INF("\tinstr = 0x%08x\n",ramEntryThis.instr);
+ MY_INF("\tdata = 0x%016llx\n",ramEntryThis.data);
+
+ // -------------------------------------------------------------------------
+ // Insert the new RAM entry into the table in BE format.
+ //
+ ramEntryNext = (RamTableEntry*)hostRamEntryThis;
+ // ...some redundant checking
+ if (bNewTable) {
+ // For any new table, the insertion location should be clean. We check for this here.
+ if (myRev32(ramEntryNext->header)!=0) {
+ MY_ERR("WARNING : Table entry location should be empty for a new table. Check code and image. Dumping data:\n");
+ MY_ERR("\theader = 0x%08x\n",myRev32(ramEntryNext->header));
+ MY_ERR("\tinstr = 0x%08x\n",myRev32(ramEntryNext->instr));
+ MY_ERR("\tdata = 0x%016llx\n",myRev64(ramEntryNext->data));
+ rc = IMGBUILD_WARN_RAM_TABLE_CONTAMINATION;
+ }
+ }
+ // ..insert the new Ram entry.
+ ramEntryNext->header = myRev32(ramEntryThis.header);
+ ramEntryNext->instr = myRev32(ramEntryThis.instr);
+ ramEntryNext->data = myRev64(ramEntryThis.data);
+
+ return rc;
+}
+
+
+/*
+// io_image - Pointer to SLW image.
+// i_modeBuild - 0: HB/IPL mode, 1: PHYP/Rebuild mode, 2: SRAM mode.
+// i_scomAddr - Scom address.
+// i_coreId - The core ID [0:15].
+// i_scomData - Data to write to scom register.
+// i_operation - What to do with the scom addr and data.
+// i_section - 0: General Scoms, 1: L2 cache, 2: L3 cache.
+*/
+uint32_t p8_pore_gen_scom_fixed(void *io_image,
+ uint8_t i_modeBuild,
+ uint32_t i_scomAddr,
+ uint32_t i_coreId, // [0:15]
+ uint64_t i_scomData,
+ uint32_t i_operation, // [0:7]
+ uint32_t i_section) // [0,1,2]
+{
+ uint32_t rc=0, rcLoc=0, iEntry=0;
+ uint32_t chipletId=0;
+ uint32_t operation=0;
+ uint32_t entriesCount=0, entriesMatch=0, entriesNOP=0;
+ void *hostSlwSection;
+ void *hostSlwSectionFixed;
+ uint64_t xipScomTableThis;
+ void *hostScomVector, *hostScomTableThis;
+ void *hostScomEntryNext; // running entry pointer
+ void *hostScomEntryMatch=NULL; // pointer to entry that matches scomAddr
+ void *hostScomEntryRET=NULL; // pointer to first return instr after table
+ void *hostScomEntryNOP=NULL; // pointer to first nop IIS
+ uint8_t bufIIS[XIPSIZE_SCOM_ENTRY], bufNOP[4], bufRET[4];
+ SbeXipSection xipSection;
+ SbeXipItem xipTocItem;
+ PoreInlineContext ctx;
+
+ // -------------------------------------------------------------------------
+ // Validate Scom parameters.
+ //
+ // ...check if valid Scom register (is there anything we can do here to check?)
+ // Skipping check. We blindly trust caller.
+ //
+ // ...check mode build
+ if (i_modeBuild>P8_SLW_MODEBUILD_MAX_VALUE) {
+ MY_ERR("modeBuild=%i invalid. Valid range is [0;%i].",
+ i_modeBuild,P8_SLW_MODEBUILD_MAX_VALUE);
+ rcLoc = 1;
+ }
+ // ...check Scom operation
+ if (i_operation>P8_PORE_SCOM_LAST_OP) {
+ MY_ERR("Scom operation = %i is not within valid range of [%d;%d]\n",
+ i_operation, P8_PORE_SCOM_FIRST_OP, P8_PORE_SCOM_LAST_OP);
+ rcLoc = 1;
+ }
+ // ...check that core ID corresponds to valid chiplet ID
+ chipletId = i_coreId + P8_CID_EX_LOW;
+ if (chipletId<P8_CID_EX_LOW || chipletId>P8_CID_EX_HIGH) {
+ MY_ERR("Chiplet ID = 0x%02x is not within valid range of [0x%02x;0x%02x]\n",
+ chipletId, P8_CID_EX_LOW, P8_CID_EX_HIGH);
+ rcLoc = 1;
+ }
+ if (rcLoc)
+ return IMGBUILD_ERR_SCOM_INVALID_PARM;
+ rcLoc = 0;
+
+ // -------------------------------------------------------------------------
+ // Check slw section location and size. (Mainly needed for fixed image.)
+ //
+ if (i_modeBuild==P8_SLW_MODEBUILD_IPL ||
+ i_modeBuild==P8_SLW_MODEBUILD_REBUILD) { // Fixed image.
+ hostSlwSectionFixed = (void*)( (uintptr_t)io_image +
+ FIXED_SLW_IMAGE_SIZE -
+ FIXED_FFDC_SECTION_SIZE -
+ FIXED_SLW_SECTION_SIZE );
+ // Even though we shouldn't call this api during a rebuild, it should be
+ // safe to do so in this particular case since none of the info requested
+ // is supposed to be moved during a rebuild.
+ rc = sbe_xip_get_section( io_image, SBE_XIP_SECTION_SLW, &xipSection);
+ if (rc) {
+ MY_ERR("Probably invalid section name for SBE_XIP_SECTION_SLW.\n");
+ return IMGBUILD_ERR_GET_SECTION;
+ }
+ hostSlwSection = (void*)((uintptr_t)io_image + xipSection.iv_offset);
+ if (hostSlwSectionFixed!=hostSlwSection) {
+ MY_ERR("hostSlwSectionFixed != hostSlwSection(from image api).\n");
+ return IMGBUILD_ERR_SCOM_HDRS_NOT_SYNCD;
+ }
+ else {
+ MY_INF("hostSlwSectionFixed == hostSlwSection(from image api).\n");
+ }
+ }
+ else { // SRAM non-fixed image.
+ rc = sbe_xip_get_section( io_image, SBE_XIP_SECTION_SLW, &xipSection);
+ if (rc) {
+ MY_ERR("Probably invalid section name for SBE_XIP_SECTION_SLW.\n");
+ return IMGBUILD_ERR_GET_SECTION;
+ }
+ hostSlwSection = (void*)((uintptr_t)io_image + xipSection.iv_offset);
+ }
+
+ // -------------------------------------------------------------------------
+ // Check .slw section size and cross-check w/header define.
+ //
+ if (xipSection.iv_size!=FIXED_SLW_SECTION_SIZE) {
+ MY_ERR("SLW table size in *.H header file (=%ld) differs from SLW section size in image (=%i).\n",FIXED_SLW_SECTION_SIZE,xipSection.iv_size);
+ MY_ERR("Check code or image version.\n");
+ return IMGBUILD_ERR_SCOM_HDRS_NOT_SYNCD;
+ }
+
+ // -------------------------------------------------------------------------
+ // Summarize parameters and checking results.
+ //
+ MY_INF("Input parameter checks - OK\n");
+ MY_INF("\tRegister = 0x%08x\n",i_scomAddr);
+ MY_INF("\tOperation = %i\n",i_operation);
+ MY_INF("\tSection = %i\n",i_section);
+ MY_INF("\tCore ID = %i\n",i_coreId);
+ MY_INF("Image validation and size checks - OK\n");
+ MY_INF("\tSLW section size= %i\n",xipSection.iv_size);
+
+ // -------------------------------------------------------------------------
+ // Locate Scom vector according to i_section and then locate Scom table
+ // associated with "This" core ID.
+ //
+ if (i_modeBuild==P8_SLW_MODEBUILD_IPL ||
+ i_modeBuild==P8_SLW_MODEBUILD_REBUILD) { // Fixed image.
+ switch (i_section) {
+ case P8_SCOM_SECTION_NC:
+ hostScomTableThis = (void*)( (uintptr_t)hostSlwSection +
+ SLW_RAM_TABLE_SIZE +
+ SLW_SCOM_TABLE_SPACE_PER_CORE_NC*i_coreId );
+ break;
+ case P8_SCOM_SECTION_L2:
+ hostScomTableThis = (void*)( (uintptr_t)hostSlwSection +
+ SLW_RAM_TABLE_SIZE +
+ SLW_SCOM_TABLE_SIZE_NC +
+ SLW_SCOM_TABLE_SPACE_PER_CORE_L2*i_coreId );
+ break;
+ case P8_SCOM_SECTION_L3:
+ hostScomTableThis = (void*)( (uintptr_t)hostSlwSection +
+ SLW_RAM_TABLE_SIZE +
+ SLW_SCOM_TABLE_SIZE_NC +
+ SLW_SCOM_TABLE_SIZE_L2 +
+ SLW_SCOM_TABLE_SPACE_PER_CORE_L3*i_coreId );
+ break;
+ default:
+ MY_ERR("Invalid value for i_section (=%i).\n",i_section);
+ MY_ERR("Valid values for i_section = [%i,%i,%i].\n",
+ P8_SCOM_SECTION_NC,P8_SCOM_SECTION_L2,P8_SCOM_SECTION_L3);
+ return IMGBUILD_ERR_SCOM_INVALID_SUBSECTION;
+ break;
+ }
+ }
+ else { // SRAM non-fixed image.
+ switch (i_section) {
+ case P8_SCOM_SECTION_NC:
+ rc = sbe_xip_find( io_image, SLW_HOST_SCOM_NC_VECTOR_TOC_NAME, &xipTocItem);
+ if (rc) {
+ MY_ERR("Probably invalid key word for SLW_HOST_SCOM_NC_VECTOR_TOC_NAME.\n");
+ return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
+ }
+ break;
+ case P8_SCOM_SECTION_L2:
+ rc = sbe_xip_find( io_image, SLW_HOST_SCOM_L2_VECTOR_TOC_NAME, &xipTocItem);
+ if (rc) {
+ MY_ERR("Probably invalid key word for SLW_HOST_SCOM_L2_VECTOR_TOC_NAME.\n");
+ return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
+ }
+ break;
+ case P8_SCOM_SECTION_L3:
+ rc = sbe_xip_find( io_image, SLW_HOST_SCOM_L3_VECTOR_TOC_NAME, &xipTocItem);
+ if (rc) {
+ MY_ERR("Probably invalid key word for SLW_HOST_SCOM_L3_VECTOR_TOC_NAME.\n");
+ return IMGBUILD_ERR_KEYWORD_NOT_FOUND;
+ }
+ break;
+ default:
+ MY_ERR("Invalid value for i_section (=%i).\n",i_section);
+ MY_ERR("Valid values for i_section = [%i,%i,%i].\n",
+ P8_SCOM_SECTION_NC,P8_SCOM_SECTION_L2,P8_SCOM_SECTION_L3);
+ return IMGBUILD_ERR_SCOM_INVALID_SUBSECTION;
+ }
+ MY_INF("xipTocItem.iv_address = 0x%016llx\n",xipTocItem.iv_address);
+ sbe_xip_pore2host( io_image, xipTocItem.iv_address, &hostScomVector);
+ MY_INF("hostScomVector = 0x%016llx\n",(uint64_t)hostScomVector);
+ xipScomTableThis = myRev64(*((uint64_t*)hostScomVector + i_coreId));
+ MY_INF("xipScomTableThis = 0x%016llx\n",xipScomTableThis);
+ if (xipScomTableThis) {
+ sbe_xip_pore2host( io_image, xipScomTableThis, &hostScomTableThis);
+ }
+ else { // Should never be here.
+ MY_ERR("Code or image bug. Scom vector table entries should never be null.\n");
+ return IMGBUILD_ERR_CHECK_CODE;
+ }
+ }
+
+ //
+ // Determine where to place/do Scom action and if entry already exists.
+ // Insertion rules:
+ // - If entry doesn't exist, insert at first NOP. (Note that if you don't do
+ // this, then the table might potentially overflow since the max table size
+ // doesn't include NOP entries.)
+ // - If no NOP found, insert at first RET.
+ //
+
+ //----------------------------------------------------------------------------
+ // 1. Create search strings for addr, nop and ret.
+ //----------------------------------------------------------------------------
+ // Note, the following IIS will also be used in case of
+ // - i_operation==append
+ // - i_operation==replace
+ pore_inline_context_create( &ctx, (void*)bufIIS, XIPSIZE_SCOM_ENTRY, 0, 0);
+ pore_LS( &ctx, P1, chipletId);
+ pore_STI( &ctx, i_scomAddr, P1, i_scomData);
+ if (ctx.error > 0) {
+ MY_ERR("pore_LS or _STI generated rc = %d", ctx.error);
+ return IMGBUILD_ERR_PORE_INLINE_ASM;
+ }
+ pore_inline_context_create( &ctx, (void*)bufRET, 4, 0, 0);
+ pore_RET( &ctx);
+ if (ctx.error > 0) {
+ MY_ERR("pore_RET generated rc = %d", ctx.error);
+ return IMGBUILD_ERR_PORE_INLINE_ASM;
+ }
+ pore_inline_context_create( &ctx, (void*)bufNOP, 4, 0, 0);
+ pore_NOP( &ctx);
+ if (ctx.error > 0) {
+ MY_ERR("pore_NOP generated rc = %d", ctx.error);
+ return IMGBUILD_ERR_PORE_INLINE_ASM;
+ }
+
+ //----------------------------------------------------------------------------
+ // 2. Search for addr and nop in relevant coreId table until first RET.
+ //----------------------------------------------------------------------------
+ // Note:
+ // - We go through ALL entries until first RET instr. We MUST find a RET instr,
+ // though we don't check for overrun until later. (Could be improved.)
+ // - Count number of entries, incl the NOOPs, until we find an RET.
+ // - The STI(+SCOM_addr) opcode is in the 2nd word of the Scom entry.
+ // - For an append operation, if a NOP is found (before a RET obviously), the
+ // SCOM is replacing that NNNN sequence.
+ hostScomEntryNext = hostScomTableThis;
+ MY_INF("hostScomEntryNext (addr): 0x%016llx\n ",(uint64_t)hostScomEntryNext);
+ while (memcmp(hostScomEntryNext, bufRET, sizeof(uint32_t))) {
+ entriesCount++;
+ MY_INF("Number of SCOM entries: %i\n ",entriesCount);
+ if (*((uint32_t*)bufIIS+1)==*((uint32_t*)hostScomEntryNext+1) && entriesMatch==0) {// +1 skips 1st word in Scom entry (which loads the PC in an LS operation.)
+ hostScomEntryMatch = hostScomEntryNext;
+ entriesMatch++;
+ }
+ if (memcmp(hostScomEntryNext, bufNOP, sizeof(uint32_t))==0 && entriesNOP==0) {
+ hostScomEntryNOP = hostScomEntryNext;
+ entriesNOP++;
+ }
+ hostScomEntryNext = (void*)((uintptr_t)hostScomEntryNext + XIPSIZE_SCOM_ENTRY);
+ }
+ hostScomEntryRET = hostScomEntryNext; // The last EntryNext is always the first RET.
+
+ //----------------------------------------------------------------------------
+ // 3. Qualify (translate) operation and IIS.
+ //----------------------------------------------------------------------------
+ if (i_operation==P8_PORE_SCOM_APPEND)
+ {
+ operation = i_operation;
+ }
+ else if (i_operation==P8_PORE_SCOM_REPLACE)
+ {
+ if (hostScomEntryMatch)
+ // ... do a replace
+ operation = i_operation;
+ else
+ // ... do an append
+ operation = P8_PORE_SCOM_APPEND;
+ }
+ else if (i_operation==P8_PORE_SCOM_NOOP)
+ {
+ // ...overwrite earlier bufIIS from the search step
+ pore_inline_context_create( &ctx, (void*)bufIIS, XIPSIZE_SCOM_ENTRY, 0, 0);
+ pore_NOP( &ctx);
+ pore_NOP( &ctx);
+ pore_NOP( &ctx);
+ pore_NOP( &ctx);
+ if (ctx.error > 0) {
+ MY_ERR("*** _NOP generated rc = %d", ctx.error);
+ return IMGBUILD_ERR_PORE_INLINE_ASM;
+ }
+ operation = i_operation;
+ }
+ else if ( i_operation==P8_PORE_SCOM_AND ||
+ i_operation==P8_PORE_SCOM_OR )
+ {
+ operation = i_operation;
+ }
+ else if ( i_operation==P8_PORE_SCOM_AND_APPEND )
+ {
+ if (hostScomEntryMatch)
+ // ... do the AND on existing Scom
+ operation = P8_PORE_SCOM_AND;
+ else
+ // ... do an append (this better be to an _AND register type)
+ operation = P8_PORE_SCOM_APPEND;
+ }
+ else if ( i_operation==P8_PORE_SCOM_OR_APPEND )
+ {
+ if (hostScomEntryMatch)
+ // ... do the OR on existing Scom
+ operation = P8_PORE_SCOM_OR;
+ else
+ // ... do an append (this better be to an _OR register type)
+ operation = P8_PORE_SCOM_APPEND;
+ }
+ else if (i_operation==P8_PORE_SCOM_RESET)
+ {
+ // ... create RNNN instruction sequence.
+ pore_inline_context_create( &ctx, (void*)bufIIS, XIPSIZE_SCOM_ENTRY, 0, 0);
+ pore_RET( &ctx);
+ pore_NOP( &ctx);
+ pore_NOP( &ctx);
+ pore_NOP( &ctx);
+ if (ctx.error > 0) {
+ MY_ERR("***_RET or _NOP generated rc = %d", ctx.error);
+ return IMGBUILD_ERR_PORE_INLINE_ASM;
+ }
+ operation = i_operation;
+ }
+ else
+ {
+ MY_ERR("Scom operation = %i is not within valid range of [%d;%d]\n",
+ i_operation, P8_PORE_SCOM_FIRST_OP, P8_PORE_SCOM_LAST_OP);
+ return IMGBUILD_ERR_SCOM_INVALID_PARM;
+ }
+
+ //----------------------------------------------------------------------------
+ // 4. Check for overrun.
+ //----------------------------------------------------------------------------
+ // Note:
+ // - An entry count exceeding the max allocated entry count will result in a code error
+ // because the allocation is based on an agreed upon max number of entries and
+ // therefore either the code header file needs to change or the caller is not abiding
+ // by the rules.
+ // - An entry count equalling the max allocated entry count is allowed for all commands
+ // except the APPEND command, incl the translated REPLACE->APPEND, which will result
+ // in the previously mentioned code error being returned.
+ // - The table can be full but still include NOOPs. If so, we can still APPEND since
+ // we append at first occurrance of a NOOP or at the end of the table (at the RET).
+ switch (i_section) {
+ case P8_SCOM_SECTION_NC:
+ if ( ( (operation==P8_PORE_SCOM_APPEND && entriesCount==SLW_MAX_SCOMS_NC) &&
+ hostScomEntryNOP==NULL ) ||
+ entriesCount>SLW_MAX_SCOMS_NC )
+ {
+ MY_ERR("SCOM table NC is full. Max %i entries allowed.\n",SLW_MAX_SCOMS_NC);
+ return IMGBUILD_ERR_CHECK_CODE;
+ }
+ break;
+ case P8_SCOM_SECTION_L2:
+ if ( ( (operation==P8_PORE_SCOM_APPEND && entriesCount==SLW_MAX_SCOMS_L2) &&
+ hostScomEntryNOP==NULL ) ||
+ entriesCount>SLW_MAX_SCOMS_L2 )
+ {
+ MY_ERR("SCOM table L2 is full. Max %i entries allowed.\n",SLW_MAX_SCOMS_L2);
+ return IMGBUILD_ERR_CHECK_CODE;
+ }
+ break;
+ case P8_SCOM_SECTION_L3:
+ if ( ( (operation==P8_PORE_SCOM_APPEND && entriesCount==SLW_MAX_SCOMS_L3) &&
+ hostScomEntryNOP==NULL ) ||
+ entriesCount>SLW_MAX_SCOMS_L3 )
+ {
+ MY_ERR("SCOM table L3 is full. Max %i entries allowed.\n",SLW_MAX_SCOMS_L3);
+ return IMGBUILD_ERR_CHECK_CODE;
+ }
+ break;
+ default:
+ MY_ERR("Invalid value for i_section (=%i).\n",i_section);
+ MY_ERR("Valid values for i_section = [%i,%i,%i].\n",
+ P8_SCOM_SECTION_NC,P8_SCOM_SECTION_L2,P8_SCOM_SECTION_L3);
+ return IMGBUILD_ERR_SCOM_INVALID_SUBSECTION;
+ }
+
+
+ // ---------------------------------------------------------------------------
+ // 5. Insert the SCOM.
+ // ---------------------------------------------------------------------------
+ // Assuming pre-allocated Scom table (after pre-allocated Ram table):
+ // - Table is pre-filled with RNNN ISS.
+ // - Each core Id has dedicated space, uniformly distributed by SLW_MAX_SCOMS_NC*
+ // XIPSIZE_SCOM_ENTRY.
+ // - Remember to check for more than SLW_MAX_SCOMS_NC entries!
+ switch (operation) {
+
+ case P8_PORE_SCOM_APPEND: // Append a Scom at first occurring NNNN or RNNN,
+ if (hostScomEntryNOP) {
+ // ... replace the NNNN
+ MY_INF("Append at NOP\n");
+ memcpy(hostScomEntryNOP,(void*)bufIIS,XIPSIZE_SCOM_ENTRY);
+ }
+ else if (hostScomEntryRET) {
+ // ... replace the RNNN
+ MY_INF("Append at RET\n");
+ memcpy(hostScomEntryRET,(void*)bufIIS,XIPSIZE_SCOM_ENTRY);
+ }
+ else {
+ // We should never be here.
+ MY_ERR("In case=_SCOM_APPEND: EntryRET=NULL is impossible. Check code.\n");
+ return IMGBUILD_ERR_CHECK_CODE;
+ }
+ break;
+ case P8_PORE_SCOM_REPLACE: // Replace existing Scom with new data
+ if (hostScomEntryMatch) {
+ // ... do a vanilla replace
+ MY_INF("Replace existing Scom\n");
+ memcpy(hostScomEntryMatch,(void*)bufIIS,XIPSIZE_SCOM_ENTRY);
+ }
+ else {
+ // We should never be here.
+ MY_ERR("In case=_SCOM_REPLACE: EntryMatch=NULL is impossible. Check code.\n");
+ return IMGBUILD_ERR_CHECK_CODE;
+ }
+ break;
+ case P8_PORE_SCOM_NOOP:
+ if (hostScomEntryMatch) {
+ // ... do a vanilla replace
+ MY_INF("Replace existing Scom w/NOPs\n");
+ memcpy(hostScomEntryMatch,(void*)bufIIS,XIPSIZE_SCOM_ENTRY);
+ }
+ else {
+ MY_ERR("No Scom entry found to replace NOOPs with.\n");
+ return IMGBUILD_ERR_SCOM_ENTRY_NOT_FOUND;
+ }
+ break;
+ case P8_PORE_SCOM_OR: // Overlay Scom data onto existing data by bitwise OR
+ if (hostScomEntryMatch) {
+ // ... do an OR on the data (which is the 2nd DWord in the entry)
+ MY_INF("Overlay existing Scom - OR case\n");
+ *((uint64_t*)hostScomEntryMatch+1) =
+ *((uint64_t*)hostScomEntryMatch+1) | myRev64(i_scomData);
+ }
+ else {
+ MY_ERR("No Scom entry found to do OR operation with.\n");
+ return IMGBUILD_ERR_SCOM_ENTRY_NOT_FOUND;
+ }
+ break;
+ case P8_PORE_SCOM_AND: // Overlay Scom data onto existing data by bitwise AND
+ if (hostScomEntryMatch) {
+ // ... do an AND on the data (which is the 2nd DWord in the entry)
+ MY_INF("Overlay existing Scom - AND case\n");
+ *((uint64_t*)hostScomEntryMatch+1) =
+ *((uint64_t*)hostScomEntryMatch+1) & myRev64(i_scomData);
+ }
+ else {
+ MY_ERR("No Scom entry found to do AND operation with.\n");
+ return IMGBUILD_ERR_SCOM_ENTRY_NOT_FOUND;
+ }
+ break;
+ case P8_PORE_SCOM_RESET: // Reset (delete) table. Refill w/RNNN ISS.
+ MY_INF("Reset table\n");
+ hostScomEntryNext = hostScomTableThis;
+ for ( iEntry=0; iEntry<entriesCount; iEntry++) {
+ memcpy( hostScomEntryNext, (void*)bufIIS, XIPSIZE_SCOM_ENTRY);
+ hostScomEntryNext = (void*)((uintptr_t)hostScomEntryNext + XIPSIZE_SCOM_ENTRY);
+ }
+ break;
+ default:
+ MY_ERR("Impossible value of operation (=%i). Check code.\n",operation);
+ return IMGBUILD_ERR_CHECK_CODE;
+
+ } // End of switch(operation)
+
+ return rc;
+}
diff --git a/libpore/p8_pore_table_static_data.c b/libpore/p8_pore_table_static_data.c
new file mode 100644
index 0000000..49b9d9a
--- /dev/null
+++ b/libpore/p8_pore_table_static_data.c
@@ -0,0 +1,60 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/p8_pore_table_static_data.c $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+
+// $Id: p8_pore_table_static_data.c,v 1.7 2013-05-23 21:10:38 dcrowell Exp $
+/*------------------------------------------------------------------------------*/
+/* *! (C) Copyright International Business Machines Corp. 2012 */
+/* *! All Rights Reserved -- Property of IBM */
+/* *! *** IBM Confidential *** */
+/*------------------------------------------------------------------------------*/
+/* *! TITLE : p8_pore_table_static_data */
+/* *! DESCRIPTION : Global static data declaration file. */
+/* *! OWNER NAME : Michael Olsen Email: cmolsen@us.ibm.com */
+//
+/* *! COMMENTS : This file is exclusively for PHYP environment. */
+//
+/*------------------------------------------------------------------------------*/
+#include <p8_pore_table_gen_api.H>
+
+const SlwSprRegs SLW_SPR_REGS[] = {
+ /* name value swizzled */
+ // ...core regs
+ { "P8_SPR_HRMOR", P8_SPR_HRMOR, ( P8_SPR_HRMOR >>5 | ( P8_SPR_HRMOR &0x1f)<<5 ) },
+ { "P8_SPR_HMEER", P8_SPR_HMEER, ( P8_SPR_HMEER >>5 | ( P8_SPR_HMEER &0x1f)<<5 ) },
+ { "P8_SPR_PMICR", P8_SPR_PMICR, ( P8_SPR_PMICR >>5 | ( P8_SPR_PMICR &0x1f)<<5 ) },
+ { "P8_SPR_PMCR", P8_SPR_PMCR, ( P8_SPR_PMCR >>5 | ( P8_SPR_PMCR &0x1f)<<5 ) },
+ { "P8_SPR_HID0", P8_SPR_HID0, ( P8_SPR_HID0 >>5 | ( P8_SPR_HID0 &0x1f)<<5 ) },
+ { "P8_SPR_HID1", P8_SPR_HID1, ( P8_SPR_HID1 >>5 | ( P8_SPR_HID1 &0x1f)<<5 ) },
+ { "P8_SPR_HID4", P8_SPR_HID4, ( P8_SPR_HID4 >>5 | ( P8_SPR_HID4 &0x1f)<<5 ) },
+ { "P8_SPR_HID5", P8_SPR_HID5, ( P8_SPR_HID5 >>5 | ( P8_SPR_HID5 &0x1f)<<5 ) },
+ { "P8_CORE_XTRA8", P8_CORE_XTRA8,( P8_CORE_XTRA8 ) },
+ { "P8_CORE_XTRA9", P8_CORE_XTRA9,( P8_CORE_XTRA9 ) },
+ // ...thread regs
+ { "P8_SPR_HSPRG0", P8_SPR_HSPRG0,( P8_SPR_HSPRG0>>5 | ( P8_SPR_HSPRG0&0x1f)<<5 ) },
+ { "P8_SPR_LPCR", P8_SPR_LPCR, ( P8_SPR_LPCR >>5 | ( P8_SPR_LPCR &0x1f)<<5 ) },
+ { "P8_MSR_MSR", P8_MSR_MSR, ( P8_MSR_MSR ) },
+ { "P8_THRD_XTRA3", P8_THRD_XTRA3,( P8_THRD_XTRA3 ) },
+ { "P8_THRD_XTRA4", P8_THRD_XTRA4,( P8_THRD_XTRA4 ) },
+};
+
+const int SLW_SPR_REGS_SIZE = sizeof(SLW_SPR_REGS)/sizeof(SLW_SPR_REGS[0]);
diff --git a/libpore/pgas.h b/libpore/pgas.h
new file mode 100644
index 0000000..3d985f8
--- /dev/null
+++ b/libpore/pgas.h
@@ -0,0 +1,1169 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pgas.h $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef __PGAS_H__
+#define __PGAS_H__
+
+#define __PGAS__
+
+// $Id: pgas.h,v 1.21 2013/11/20 14:06:39 bcbrock Exp $
+
+// ** WARNING : This file is maintained as part of the OCC firmware. Do **
+// ** not edit this file in the PMX area, the hardware procedure area, **
+// ** or the PoreVe area as any changes will be lost. **
+
+/// \file pgas.h
+/// \brief Pore GAS
+///
+/// PGAS is documented in a seperate standalone document entitled <em> PGAS :
+/// PORE GAS (GNU Assembler) User's and Reference Manual </em>.
+///
+/// This file defines support macros for the GNU PORE assembler, and the PORE
+/// inline assembler and disassebler which follow the PGAS assembly syntax.
+/// If the compile swith PGAS_PPC is defined in the environment then pgas.h
+/// includes pgas_ppc.h which transforms a PowerPC assembler into an assembler
+/// for PORE.
+
+// These are the opcodes and mnemonics as defined by the PORE hardware
+// manual. Many of them will change names slightly in PGAS.
+
+#define PORE_OPCODE_NOP 0x0f
+#define PORE_OPCODE_WAIT 0x01
+#define PORE_OPCODE_TRAP 0x02
+#define PORE_OPCODE_HOOK 0x4f
+
+#define PORE_OPCODE_BRA 0x10
+#define PORE_OPCODE_BRAZ 0x12
+#define PORE_OPCODE_BRANZ 0x13
+#define PORE_OPCODE_BRAI 0x51
+#define PORE_OPCODE_BSR 0x14
+#define PORE_OPCODE_BRAD 0x1c
+#define PORE_OPCODE_BSRD 0x1d
+#define PORE_OPCODE_RET 0x15
+#define PORE_OPCODE_CMPBRA 0x56
+#define PORE_OPCODE_CMPNBRA 0x57
+#define PORE_OPCODE_CMPBSR 0x58
+#define PORE_OPCODE_LOOP 0x1f
+
+#define PORE_OPCODE_ANDI 0x60
+#define PORE_OPCODE_ORI 0x61
+#define PORE_OPCODE_XORI 0x62
+
+#define PORE_OPCODE_AND 0x25
+#define PORE_OPCODE_OR 0x26
+#define PORE_OPCODE_XOR 0x27
+
+#define PORE_OPCODE_ADD 0x23
+#define PORE_OPCODE_ADDI 0x24
+#define PORE_OPCODE_SUB 0x29
+#define PORE_OPCODE_SUBI 0x28
+#define PORE_OPCODE_NEG 0x2a
+
+#define PORE_OPCODE_COPY 0x2c
+#define PORE_OPCODE_ROL 0x2e
+
+#define PORE_OPCODE_LOAD20 0x30
+#define PORE_OPCODE_LOAD64 0x71
+#define PORE_OPCODE_SCR1RD 0x32
+#define PORE_OPCODE_SCR1RDA 0x73
+#define PORE_OPCODE_SCR2RD 0x36
+#define PORE_OPCODE_SCR2RDA 0x77
+#define PORE_OPCODE_WRI 0x78
+#define PORE_OPCODE_BS 0x74
+#define PORE_OPCODE_BC 0x75
+#define PORE_OPCODE_SCR1WR 0x39
+#define PORE_OPCODE_SCR2WR 0x3a
+#define PORE_OPCODE_SCAND 0x7c
+
+
+// These are the PGAS versions of the PORE opcodes used in the legacy PGAS_PPC
+// assembler and the current PORE inline assembler/disassembler.
+
+#define PGAS_OPCODE_NOP PORE_OPCODE_NOP
+#define PGAS_OPCODE_WAITS PORE_OPCODE_WAIT
+#define PGAS_OPCODE_TRAP PORE_OPCODE_TRAP
+#define PGAS_OPCODE_HOOKI PORE_OPCODE_HOOK
+
+#define PGAS_OPCODE_BRA PORE_OPCODE_BRA
+#define PGAS_OPCODE_BRAZ PORE_OPCODE_BRAZ
+#define PGAS_OPCODE_BRANZ PORE_OPCODE_BRANZ
+#define PGAS_OPCODE_BRAI PORE_OPCODE_BRAI
+#define PGAS_OPCODE_BSR PORE_OPCODE_BSR
+#define PGAS_OPCODE_BRAD PORE_OPCODE_BRAD
+#define PGAS_OPCODE_BSRD PORE_OPCODE_BSRD
+#define PGAS_OPCODE_RET PORE_OPCODE_RET
+#define PGAS_OPCODE_CMPIBRAEQ PORE_OPCODE_CMPBRA
+#define PGAS_OPCODE_CMPIBRANE PORE_OPCODE_CMPNBRA
+#define PGAS_OPCODE_CMPIBSREQ PORE_OPCODE_CMPBSR
+#define PGAS_OPCODE_LOOP PORE_OPCODE_LOOP
+
+#define PGAS_OPCODE_ANDI PORE_OPCODE_ANDI
+#define PGAS_OPCODE_ORI PORE_OPCODE_ORI
+#define PGAS_OPCODE_XORI PORE_OPCODE_XORI
+
+#define PGAS_OPCODE_AND PORE_OPCODE_AND
+#define PGAS_OPCODE_OR PORE_OPCODE_OR
+#define PGAS_OPCODE_XOR PORE_OPCODE_XOR
+
+#define PGAS_OPCODE_ADD PORE_OPCODE_ADD
+#define PGAS_OPCODE_ADDS PORE_OPCODE_ADDI
+#define PGAS_OPCODE_SUB PORE_OPCODE_SUB
+#define PGAS_OPCODE_SUBS PORE_OPCODE_SUBI
+#define PGAS_OPCODE_NEG PORE_OPCODE_NEG
+
+#define PGAS_OPCODE_MR PORE_OPCODE_COPY
+#define PGAS_OPCODE_ROLS PORE_OPCODE_ROL
+
+#define PGAS_OPCODE_LS PORE_OPCODE_LOAD20
+#define PGAS_OPCODE_LI PORE_OPCODE_LOAD64
+#define PGAS_OPCODE_LD0 PORE_OPCODE_SCR1RD /* Used by LD */
+#define PGAS_OPCODE_LD0ANDI PORE_OPCODE_SCR1RDA /* Used by LDANDI */
+#define PGAS_OPCODE_LD1 PORE_OPCODE_SCR2RD /* Used by LD */
+#define PGAS_OPCODE_LD1ANDI PORE_OPCODE_SCR2RDA /* Used by LDANDI */
+#define PGAS_OPCODE_STI PORE_OPCODE_WRI
+#define PGAS_OPCODE_STD0 PORE_OPCODE_SCR1WR /* Used by STD */
+#define PGAS_OPCODE_STD1 PORE_OPCODE_SCR2WR /* Used by STD */
+#define PGAS_OPCODE_SCAND PORE_OPCODE_SCAND
+
+#ifdef IGNORE_HW274735
+
+// BSI and BCI are normally redacted due to HW274735. See also pgas.h
+
+#define PGAS_OPCODE_BSI PORE_OPCODE_BS
+#define PGAS_OPCODE_BCI PORE_OPCODE_BC
+
+#endif // IGNORE_HW274735
+
+// These are the programmer-visible register names as defined by the PORE
+// hardware manual. All of these names (except the PC) appear differently in
+// the PGAS syntax, in some cases to reduce confusion, in other cases just to
+// have more traditional short mnemonics.
+
+#define PORE_REGISTER_PRV_BASE_ADDR0 0x0
+#define PORE_REGISTER_PRV_BASE_ADDR1 0x1
+#define PORE_REGISTER_OCI_BASE_ADDR0 0x2
+#define PORE_REGISTER_OCI_BASE_ADDR1 0x3
+#define PORE_REGISTER_SCRATCH0 0x4
+#define PORE_REGISTER_SCRATCH1 0x5
+#define PORE_REGISTER_SCRATCH2 0x6
+#define PORE_REGISTER_ERROR_MASK 0x7
+#define PORE_REGISTER_EXE_TRIGGER 0x9
+#define PORE_REGISTER_DATA0 0xa
+#define PORE_REGISTER_PC 0xe
+#define PORE_REGISTER_IBUF_ID 0xf
+
+
+// PgP IBUF_ID values
+
+#define PORE_ID_GPE0 0x00
+#define PORE_ID_GPE1 0x01
+#define PORE_ID_SLW 0x08
+#define PORE_ID_SBE 0x04
+
+
+// Condition Codes
+
+#define PORE_CC_UGT 0x8000
+#define PORE_CC_ULT 0x4000
+#define PORE_CC_SGT 0x2000
+#define PORE_CC_SLT 0x1000
+#define PORE_CC_C 0x0800
+#define PORE_CC_V 0x0400
+#define PORE_CC_N 0x0200
+#define PORE_CC_Z 0x0100
+
+
+// Memory Spaces
+
+#define PORE_SPACE_UNDEFINED 0xffff
+#define PORE_SPACE_OCI 0x8000
+#define PORE_SPACE_PNOR 0x800b
+#define PORE_SPACE_OTPROM 0x0001
+#define PORE_SPACE_SEEPROM 0x800c
+#define PORE_SPACE_PIBMEM 0x0008
+
+
+#ifdef __ASSEMBLER__
+
+////////////////////////////////////////////////////////////////////////////
+// PGAS Base Assembler Support
+////////////////////////////////////////////////////////////////////////////
+
+
+ //////////////////////////////////////////////////////////////////////
+ // Condition Codes
+ //////////////////////////////////////////////////////////////////////
+
+ .set CC_UGT, PORE_CC_UGT
+ .set CC_ULT, PORE_CC_ULT
+ .set CC_SGT, PORE_CC_SGT
+ .set CC_SLT, PORE_CC_SLT
+ .set CC_C, PORE_CC_C
+ .set CC_V, PORE_CC_V
+ .set CC_N, PORE_CC_N
+ .set CC_Z, PORE_CC_Z
+
+
+ //////////////////////////////////////////////////////////////////////
+ // Utility Macros
+ //////////////////////////////////////////////////////////////////////
+
+ // 'Undefine' PowerPC mnemonics to trap programming errors
+
+ .macro ..undefppc1, i
+ .ifnc \i, ignore
+ .macro \i, args:vararg
+ .error "This is a PowerPC opcode - NOT a PGAS opcode or extended mnemonic"
+ .endm
+ .endif
+ .endm
+
+ .macro .undefppc, i0, i1=ignore, i2=ignore, i3=ignore
+ ..undefppc1 \i0
+ ..undefppc1 \i1
+ ..undefppc1 \i2
+ ..undefppc1 \i3
+ .endm
+
+
+ //////////////////////////////////////////////////////////////////////
+ // Argument Checking Macros
+ //////////////////////////////////////////////////////////////////////
+ //
+ // These macros remain in the final pgas.h file because 1) they are
+ // required for some PGAS pseudo-ops, and 2) to support robust
+ // assembler macro definitions.
+
+ // Check an unsigned immediate for size
+
+ .macro ..checku, x:req, bits:req, err="Unsigned value too large"
+
+ .if (((\bits) <= 0) || ((\bits) > 63))
+ .error "The number of bits must be in the range 0 < bits < 64"
+ .endif
+
+ .iflt (\x)
+ .error "An unsigned value is required here"
+ .endif
+
+ .ifgt ((\x) - (0xffffffffffffffff >> (64 - (\bits))))
+ .error "\err"
+ .endif
+
+ .endm
+
+ // Check unsigned 16/22-bit immediates for size
+ //
+ // In general, PGAS can check immediate values for size restrictions,
+ // but unfortunately is not able to check address offset immediates for
+ // range.
+
+ .macro ..check_u16, u16
+ ..checku (\u16), 16, "Unsigned immediate is larger than 16 bits"
+ .endm
+
+ .macro ..check_u24, u24
+ ..checku (\u24), 24, "Unsigned immediate is larger than 24 bits"
+ .endm
+
+ // Check a 16/20/22-bit signed immediate for size
+
+ .macro ..check_s16, s16
+ .iflt \s16
+ .iflt \s16 + 0x8000
+ .error "Immediate value too small for a signed 16-bit field"
+ .endif
+ .else
+ .ifgt \s16 - 0x7fff
+ .error "Immediate value too large for a signed 16-bit field"
+ .endif
+ .endif
+ .endm
+
+ .macro ..check_s20, s20
+ .iflt \s20
+ .iflt \s20 + 0x80000
+ .error "Immediate value too small for a signed 20-bit field"
+ .endif
+ .else
+ .ifgt \s20 - 0x7ffff
+ .error "Immediate value too large for a signed 20-bit field"
+ .endif
+ .endif
+ .endm
+
+ .macro ..check_s22, s22
+ .iflt \s22
+ .iflt \s22 + 0x200000
+ .error "Immediate value too small for a signed 22-bit field"
+ .endif
+ .else
+ .ifgt \s22 - 0x1fffff
+ .error "Immediate value too large for a signed 22-bit field"
+ .endif
+ .endif
+ .endm
+
+ // Check a putative SCOM address for bits 0 and 8:11 == 0.
+
+ .macro ..check_scom, address
+ .if ((\address) & 0x80f00000)
+ .error "Valid SCOM addresses must have bits 0 and 8:11 equal to 0."
+ .endif
+ .endm
+
+ // A register required to be D0
+
+ .macro ..d0, reg
+ .if (\reg != D0)
+ .error "Data register D0 is required here"
+ .endif
+ .endm
+
+ // A register pair required to be D0, D1 in order
+
+ .macro ..d0d1, reg1, reg2
+ .if (((\reg1) != D0) && ((\reg2) != D1))
+ .error "Register-Register ALU operations are only defined on the source pair D0, D1"
+ .endif
+ .endm
+
+ // A register pair required to be D0, D1 in any order
+ .macro ..dxdy, reg1, reg2, err="Expecting D0, D1 in either order"
+ .if !((((\reg1) == D0) && ((\reg2) == D1)) || \
+ (((\reg1) == D1) && ((\reg2) == D0)))
+ .error "\err"
+ .endif
+ .endm
+
+ // A register pair required to be A0, A1 in any order
+ .macro ..axay, reg1, reg2, err="Expecting A0, A1 in either order"
+ .if !((((\reg1) == A0) && ((\reg2) == A1)) || \
+ (((\reg1) == A1) && ((\reg2) == A0)))
+ .error "\err"
+ .endif
+ .endm
+
+ // A register pair required to be the same register
+
+ .macro ..same, dest, src
+ .if ((\dest) != (\src))
+ .error "PGAS requires the src and dest register of ADDS/SUBS to be explicit and identical"
+ .endif
+ .endm
+
+ // A "Data" register
+
+ .macro ..data, reg:req, err="Expecting a 'Data' register"
+ .if (\reg != D0)
+ .if (\reg != D1)
+ .error "\err"
+ .endif
+ .endif
+ .endm
+
+ // An "Address" register
+
+ .macro ..address, reg:req, err=:"Expecting an 'Address' register"
+ .if (\reg != A0)
+ .if (\reg != A1)
+ .error "\err"
+ .endif
+ .endif
+ .endm
+
+ // A "Pervasive Chiplet ID" register
+
+ .macro ..pervasive_chiplet_id, reg:req, err="Expecting a 'Pervasive Chiplet ID' register"
+ .if (\reg != P0)
+ .if (\reg != P1)
+ .error "\err"
+ .endif
+ .endif
+ .endm
+
+ // A "Branch Compare Data" register
+
+ .macro ..branch_compare_data, reg
+ .if (\reg != D0)
+ .if (\reg != D1)
+ .if (\reg != CTR)
+ .error "Expecting a 'Branch Compare Data' register"
+ .endif
+ .endif
+ .endif
+ .endm
+
+ // An "LS Destination" register; Also the set for ADDS/SUBS
+
+ .macro ..ls_destination, reg
+ .if (\reg != D0)
+ .if (\reg != D1)
+ .if (\reg != A0)
+ .if (\reg != A1)
+ .if (\reg != P0)
+ .if (\reg != P1)
+ .if (\reg != CTR)
+ .error "Expecting an 'LS Destination' register"
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endm
+
+ // An "LI Destination" register
+
+ .macro ..li_destination, reg
+ .if (\reg != D0)
+ .if (\reg != D1)
+ .if (\reg != A0)
+ .if (\reg != A1)
+ .if (\reg != CTR)
+ .error "Expecting an 'LI Destination' register"
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endm
+
+ // An "LIA Destination" register
+
+ .macro ..lia_destination, reg
+ .if (\reg != D0)
+ .if (\reg != D1)
+ .if (\reg != A0)
+ .if (\reg != A1)
+ .if (\reg != TBAR)
+ .error "Expecting an 'LIA Destination' register"
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endm
+
+ // An "MR Source" register
+
+ .macro ..mr_source, reg
+ .if (\reg != D0)
+ .if (\reg != D1)
+ .if (\reg != A0)
+ .if (\reg != A1)
+ .if (\reg != P0)
+ .if (\reg != P1)
+ .if (\reg != CTR)
+ .if (\reg != PC)
+ .if (\reg != ETR)
+ .if (\reg != SPRG0)
+ .if (\reg != IFR)
+ .if (\reg != EMR)
+ .error "Expecting an 'MR Source' register"
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endm
+
+ // An "MR Destination" register
+
+ .macro ..mr_destination, reg
+ .if (\reg != D0)
+ .if (\reg != D1)
+ .if (\reg != A0)
+ .if (\reg != A1)
+ .if (\reg != P0)
+ .if (\reg != P1)
+ .if (\reg != CTR)
+ .if (\reg != PC)
+ .if (\reg != ETR)
+ .if (\reg != SPRG0)
+ .if (\reg != EMR)
+ .error "Expecting an 'MR Destination' register"
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endm
+
+
+ //////////////////////////////////////////////////////////////////////
+ // PORE address spaces
+ //////////////////////////////////////////////////////////////////////
+
+ // The ..set_address_space pseudo-op defines the default address
+ // space. It must be defined in order to use BRAA, BRAIA, BSR and
+ // CMPIBSR. Pseudo-ops are provided to set the default space of the
+ // program. Note that code assembled for PNOR will also work in the
+ // OCI space in the Sleep/Winkle engine.
+
+ .macro ..set_default_space, s
+ ..check_u16 (\s)
+ .set _PGAS_DEFAULT_SPACE, (\s)
+ .endm
+
+ .macro ..check_default_space
+ .if (_PGAS_DEFAULT_SPACE == PORE_SPACE_UNDEFINED)
+ .error "The PGAS default address space has not been defined"
+ .endif
+ .endm
+
+ ..set_default_space PORE_SPACE_UNDEFINED
+
+ .macro .oci
+ ..set_default_space PORE_SPACE_OCI
+ .endm
+
+ .macro .pnor
+ ..set_default_space PORE_SPACE_PNOR
+ .endm
+
+ .macro .seeprom
+ ..set_default_space PORE_SPACE_SEEPROM
+ .endm
+
+ .macro .otprom
+ ..set_default_space PORE_SPACE_OTPROM
+ .endm
+
+ .macro .pibmem
+ ..set_default_space PORE_SPACE_PIBMEM
+#ifndef PGAS_PPC
+ .pibmem_port (PORE_SPACE_PIBMEM & 0xf)
+#else
+ // NB: PGAS_PPC does not support relocatable PIBMEM addressing
+#endif
+ .endm
+
+
+ //////////////////////////////////////////////////////////////////////
+ // Address-Generation Pseudo Ops
+ //////////////////////////////////////////////////////////////////////
+
+ // .QUADA, .QUADIA
+
+ .macro .quada, offset:req
+ ..check_default_space
+ .long _PGAS_DEFAULT_SPACE
+ .long (\offset)
+ .endm
+
+ .macro .quadia, space:req, offset:req
+ ..check_u16 (\space)
+ .long (\space)
+ .long (\offset)
+ .endm
+
+ //////////////////////////////////////////////////////////////////////
+ // Bug workarounds
+ //////////////////////////////////////////////////////////////////////
+
+#ifndef IGNORE_HW274735
+
+ // HW274735 documents that BC and BS are broken for the PORE-GPE0/1
+ // pair. This bug is unfixed in POWER8, and by default we require BSI
+ // and BCI to be implemented as macros on all engines. For
+ // compatability we continue to require that dx == D0.
+
+ .macro bsi, dx:req, offset:req, base:req, imm:req
+ ..d0 (\dx)
+ ld D0, (\offset), (\base)
+ ori D0, D0, (\imm)
+ std D0, (\offset), (\base)
+ .endm
+
+ .macro bci, dx:req, offset:req, base:req, imm:req
+ ..d0 (\dx)
+ ldandi D0, (\offset), (\base), ~(\imm)
+ std D0, (\offset), (\base)
+ .endm
+
+#endif // IGNORE_HW274735
+
+ //////////////////////////////////////////////////////////////////////
+ // "A"- and "IA"-form Instructions
+ //////////////////////////////////////////////////////////////////////
+
+ // BRAA (Branch Address) is a 'long branch' to an address in the
+ // default memory space.
+
+ .macro braa, offset:req
+ braia _PGAS_DEFAULT_SPACE, (\offset)
+ .endm
+
+ // LA (Load Address) loads the full address of an address in the
+ // default memory space.
+
+ .macro la, dest:req, offset:req
+ lia (\dest), _PGAS_DEFAULT_SPACE, (\offset)
+ .endm
+
+ // STA (Store Address) stores the full address of an address in the
+ // default memory space.
+
+ .macro sta, mem_offset:req, base:req, addr_offset:req
+ stia (\mem_offset), (\base), _PGAS_DEFAULT_SPACE, (\addr_offset)
+ .endm
+
+ // BSRIA is a subroutine branch into another memory space. This has to
+ // be emulated by a local subroutine branch and a BRAIA.
+
+ .macro bsria, space:req, offset:req
+ bsr 27742f
+ bra 27743f
+27742:
+ braia (\space), (\offset)
+27743:
+ .endm
+
+
+////////////////////////////////////////////////////////////////////////////
+// Extended Mnemonics, Macros and Special Cases
+////////////////////////////////////////////////////////////////////////////
+
+ //////////////////////////////////////////////////////////////////////
+ // TFB<c> - Test flags and branch conditionally
+ //////////////////////////////////////////////////////////////////////'
+
+ .macro ..tfb, dest, target, flags
+ ..data (\dest)
+ mr (\dest), IFR
+ andi (\dest), (\dest), (\flags)
+ branz (\dest), (\target)
+ .endm
+
+ .macro ..tfbn dest, target, flags
+ ..data (\dest)
+ mr (\dest), IFR
+ andi (\dest), (\dest), (\flags)
+ braz (\dest), (\target)
+ .endm
+
+ .macro tfbcs, dest:req, target:req
+ ..tfb (\dest), (\target), CC_C
+ .endm
+
+ .macro tfbcc, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_C
+ .endm
+
+ .macro tfbvs, dest:req, target:req
+ ..tfb (\dest), (\target), CC_V
+ .endm
+
+ .macro tfbvc, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_V
+ .endm
+
+ .macro tfbns, dest:req, target:req
+ ..tfb (\dest), (\target), CC_N
+ .endm
+
+ .macro tfbnc, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_N
+ .endm
+
+ .macro tfbeq, dest:req, target:req
+ ..tfb (\dest), (\target), CC_Z
+ .endm
+
+ .macro tfbne, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_Z
+ .endm
+
+ .macro tfbult, dest:req, target:req
+ ..tfb (\dest), (\target), CC_ULT
+ .endm
+
+ .macro tfbule, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_UGT
+ .endm
+
+ .macro tfbuge, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_ULT
+ .endm
+
+ .macro tfbugt, dest:req, target:req
+ ..tfb (\dest), (\target), CC_UGT
+ .endm
+
+ .macro tfbslt, dest:req, target:req
+ ..tfb (\dest), (\target), CC_SLT
+ .endm
+
+ .macro tfbsle, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_SGT
+ .endm
+
+ .macro tfbsge, dest:req, target:req
+ ..tfbn (\dest), (\target), CC_SLT
+ .endm
+
+ .macro tfbsgt, dest:req, target:req
+ ..tfb (\dest), (\target), CC_SGT
+ .endm
+
+
+ //////////////////////////////////////////////////////////////////////
+ // TEB[N]<eng> - Test Engine and branch if [not] engine.
+ //////////////////////////////////////////////////////////////////////
+ //
+ // All but GPE0 use a 1-hot code.
+
+ .macro tebgpe0, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), 0xf
+ braz (\dest), (\target)
+ .endm
+
+ .macro tebgpe1, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), PORE_ID_GPE1
+ branz (\dest), (\target)
+ .endm
+
+ .macro tebslw, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), PORE_ID_SLW
+ branz (\dest), (\target)
+ .endm
+
+ .macro tebsbe, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), PORE_ID_SBE
+ branz (\dest), (\target)
+ .endm
+
+
+ .macro tebngpe0, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), 0xf
+ branz (\dest), (\target)
+ .endm
+
+ .macro tebngpe1, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), PORE_ID_GPE1
+ braz (\dest), (\target)
+ .endm
+
+ .macro tebnslw, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), PORE_ID_SLW
+ braz (\dest), (\target)
+ .endm
+
+ .macro tebnsbe, dest:req, target:req
+ mr (\dest), IFR
+ andi (\dest), (\dest), PORE_ID_SBE
+ braz (\dest), (\target)
+ .endm
+
+
+ //////////////////////////////////////////////////////////////////////
+ // EXTRPRC - Extract and right-justify the PIB/PCB return code
+ // TPRCB[N]Z - Test PIB return code and branch if [not] zero
+ // TPRCBGT - Test PIB return code and branch if greater-than
+ // TPRCBLE - Test PIB return code and branch if less-then or equal
+ //////////////////////////////////////////////////////////////////////
+ //
+ // To support cases where PORE code expects or must explicitly handle
+ // non-0 PIB return codes, the PIB return code and parity indication
+ // are stored in bits 32 (parity) and 33-35 (return code) of the IFR.
+ // These macros extract the four PIB/PCB status bits from the IFR and
+ // right-justifies them into the data register provided. For EXTRPRC
+ // that is the total function of the macro. The TPRCB[N]Z macros
+ // provide a simple non-destructive test and branch for zero (success)
+ // and non-zero (potential problem) codes after the extraction.
+ //
+ // In complex error handling scenarios one would typically compare the
+ // PIB return code against an upper-bound, e.g., the offline response
+ // (0x2), and then take further action. If the parity error bit is set
+ // then this would produce an aggregate "return code" higher than any
+ // that one would typically want to ignore. The TPRCBGT/TPRCBLE macros
+ // provide this function; however the test destroys the extracted
+ // return code so that if further analysis is required the code will
+ // need to be a extracted again.
+ //////////////////////////////////////////////////////////////////////
+
+ .macro extrprc, dest:req
+ ..data (\dest)
+ mr (\dest), IFR
+ extrdi (\dest), (\dest), 4, 32
+ .endm
+
+ .macro tprcbz, dest:req, target:req
+ extrprc (\dest)
+ braz (\dest), (\target)
+ .endm
+
+ .macro tprcbnz, dest:req, target:req
+ extrprc (\dest)
+ branz (\dest), (\target)
+ .endm
+
+ .macro tprcbgt, dest:req, target:req, bound:req
+ extrprc (\dest)
+ subs (\dest), (\dest), (\bound)
+ tfbugt (\dest), (\target)
+ .endm
+
+ .macro tprcble, dest:req, target:req, bound:req
+ extrprc (\dest)
+ subs (\dest), (\dest), (\bound)
+ tfbule (\dest), (\target)
+ .endm
+
+ //////////////////////////////////////////////////////////////////////
+ // LPCS - Load Pervasive Chiplet from Scom address
+ //////////////////////////////////////////////////////////////////////
+
+ .macro lpcs, dest:req, scom:req
+ ..pervasive_chiplet_id (\dest)
+ ..check_scom (\scom)
+ ls (\dest), (((\scom) >> 24) & 0x7f)
+ .endm
+
+
+ //////////////////////////////////////////////////////////////////////
+ // Shift/Mask extended mnemonics
+ //////////////////////////////////////////////////////////////////////
+
+ // All of the 'dot-dot' macros assume that error and identity
+ // checking has been done on the arguments already.
+
+ // The initial register-register rotate. If the incoming shift amount
+ // is 0 then the instruction generated is a simple MR.
+
+ .macro ..rotlrr, ra, rs, sh
+
+ .if (\sh) >= 32
+ rols (\ra), (\rs), 32
+ ..rotlr (\ra), ((\sh) - 32)
+ .elseif (\sh) >= 16
+ rols (\ra), (\rs), 16
+ ..rotlr (\ra), ((\sh) - 16)
+ .elseif (\sh) >= 8
+ rols (\ra), (\rs), 8
+ ..rotlr (\ra), ((\sh) - 8)
+ .elseif (\sh) >= 4
+ rols (\ra), (\rs), 4
+ ..rotlr (\ra), ((\sh) - 4)
+ .elseif (\sh) >= 1
+ rols (\ra), (\rs), 1
+ ..rotlr (\ra), ((\sh) - 1)
+ .else
+ mr (\ra), (\rs)
+ .endif
+
+ .endm
+
+
+ // Subsequent rotation of the same register. The SH should never be 0
+ // here.
+
+ .macro ..rotlr, ra, sh
+
+ .if (\sh) >= 32
+ rols (\ra), (\ra), 32
+ ..rotlr (\ra), ((\sh) - 32)
+ .elseif (\sh) >= 16
+ rols (\ra), (\ra), 16
+ ..rotlr (\ra), ((\sh) - 16)
+ .elseif (\sh) >= 8
+ rols (\ra), (\ra), 8
+ ..rotlr (\ra), ((\sh) - 8)
+ .elseif (\sh) >= 4
+ rols (\ra), (\ra), 4
+ ..rotlr (\ra), ((\sh) - 4)
+ .elseif (\sh) >= 1
+ rols (\ra), (\ra), 1
+ ..rotlr (\ra), ((\sh) - 1)
+
+ .endif
+
+ .endm
+
+
+ // RLDINM RA, RS, SH, MB, ME
+ //
+ // Defined as if there were an equivalent PowerPC instruction. The
+ // 'word' forms of the PowerPC instructions and extended mnemonics are
+ // undefined in order to catch programming typos.
+
+ .undefppc rlwinm, extrwi, rotlwi, rotrwi
+ .undefppc slwi, srwi
+
+ .macro rldinm, ra:req, rs:req, sh:req, mb:req, me:req
+
+ .if ((\sh) < 0) || ((\sh) > 63)
+ .error "SH must be in the range 0..63"
+ .endif
+ .if ((\mb) < 0) || ((\mb) > 63)
+ .error "MB must be in the range 0..63"
+ .endif
+ .if ((\me) < 0) || ((\me) > 63)
+ .error "ME must be in the range 0..63"
+ .endif
+
+ .if (((\mb) == 0) && ((\me) == 63) || ((\me) == ((\mb) - 1)))
+
+ // The mask is effectively 0..63, i.e., no mask. This is a
+ // simple rotate.
+
+ ..rotlrr (\ra), (\rs), (\sh)
+
+ .else
+
+ // We need a mask step. However if SH == 0 and RA == RS we can
+ // bypass the rotate step.
+
+ .if ((\sh) != 0) || ((\ra) != (\rs))
+ ..rotlrr (\ra), (\rs), (\sh)
+ .endif
+ .if ((\mb) <= (\me))
+
+ // This is a straightforward masking operation with a
+ // single mask.
+
+ andi (\ra), (\ra), ((0xffffffffffffffff >> (\mb)) & (0xffffffffffffffff << (63 - (\me))))
+ .else
+
+ // This is a wrapped mask.
+ // It is created as 2 masks OR-ed together - 0-ME and MB-63
+
+ andi (\ra), (\ra), (((0xffffffffffffffff >> 0) & (0xffffffffffffffff << (63 - (\me)))) | ((0xffffffffffffffff >> (\mb)) & (0xffffffffffffffff << (63 - 63))))
+ .endif
+
+ .endif
+
+ .endm
+
+ // RLDINM Extended Mnemonics
+ //
+ // Defined as if they were equivalent to PowerPC 32-bit extended
+ // mnemonics
+
+ .macro extldi, ra:req, rs:req, n:req, b:req
+ .if ((\n) < 0)
+ .error "EXTLDI requires N > 0"
+ .endif
+ rldinm (\ra), (\rs), (\b), 0, ((\n) - 1)
+ .endm
+
+ .macro extrdi, ra:req, rs:req, n:req, b:req
+ .if ((\n) < 0)
+ .error "EXTRDI requires N > 0"
+ .endif
+ rldinm (\ra), (\rs), (((\b) + (\n)) % 64), (64 - (\n)), 63
+ .endm
+
+ .macro rotldi, ra:req, rs:req, n:req
+ rldinm (\ra), (\rs), (\n), 0, 63
+ .endm
+
+
+ .macro rotrdi, ra:req, rs:req, n:req
+ rldinm (\ra), (\rs), (64 - (\n)), 0, 63
+ .endm
+
+
+ .macro sldi, ra:req, rs:req, n:req
+ rldinm (\ra), (\rs), (\n), 0, (63 - (\n))
+ .endm
+
+
+ .macro srdi, ra:req, rs:req, n:req
+ rldinm (\ra), (\rs), (64 - (\n)), (\n), 63
+ .endm
+
+
+ // RLDIMI RA, RS, SH, MB, ME
+ //
+ // Defined as if there were an equivalent PowerPC instruction. The
+ // 'word' forms of the PowerPC instructions and extended mnemonics are
+ // undefined in order to catch programming typos.
+ //
+ // Note that unlike the PowerPC instructions, here RLDIMI must destroy
+ // RS by masking and shifting it, and RA and RS may not be the same
+ // register.
+
+ .undefppc rlwimi, inslwi, insrwi
+
+ .macro rldimi, ra:req, rs:req, sh:req, mb:req, me:req
+
+ ..dxdy (\ra), (\rs)
+
+ // SH error checks are done by rldinm
+
+ .if (((\mb) == 0) && ((\me) == 63) || ((\me) == ((\mb) - 1)))
+
+ // The mask is effectively 0..63, i.e., no mask. This is a
+ // simple rotate of RS into RA
+
+ rotldi (\ra), (\rs), (\sh)
+
+ .else
+
+ // Rotate RS and AND with mask
+
+ rldinm (\rs), (\rs), (\sh), (\mb), (\me)
+
+ // Mask out the significant bits of RS, clear that section of
+ // RA, and logical OR RS into RA
+
+ .if ((\mb) <= (\me))
+
+ // This is a straightforward masking operation with a
+ // single mask.
+
+ andi (\ra), (\ra), \
+ (~((0xffffffffffffffff >> (\mb)) & (0xffffffffffffffff << (63 - (\me)))))
+ .else
+
+ // This is a wrapped mask.
+ // It is created as 2 masks OR-ed together - 0-ME and MB-63
+
+ andi (\ra), (\ra), \
+ (~(((0xffffffffffffffff >> 0) & (0xffffffffffffffff << (63 - (\me)))) | \
+ ((0xffffffffffffffff >> (\mb)) & (0xffffffffffffffff << (63 - 63)))))
+ .endif
+
+ or (\ra), D0, D1
+
+ .endif
+
+ .endm
+
+ // RLDIMI Extended Mnemonics
+ //
+ // Defined as if they were equivalent to PowerPC 32-bit extended
+ // mnemonics
+
+ .macro insldi, ra:req, rs:req, n:req, b:req
+ .if ((\n) < 0)
+ .error "INSLDI requires N > 0"
+ .endif
+ rldimi (\ra), (\rs), (64 - (\b)), (\b), ((\b) + (\n) - 1)
+ .endm
+
+ .macro insrdi, ra:req, rs:req, n:req, b:req
+ .if ((\n) < 0)
+ .error "INSRDI requires N > 0"
+ .endif
+ rldimi (\ra), (\rs), (64 - (\b) - (\n)), (\b), ((\b) + (\n) - 1)
+ .endm
+
+
+ //////////////////////////////////////////////////////////////////////
+ // .HOOK
+ //////////////////////////////////////////////////////////////////////
+
+ // The PoreVe (PORE Virtual Environment) is a PORE simulation
+ // environment that allows the programmer to embed C/C++ code into the
+ // PORE assembler source code, and arranges for the C/C++ code to be
+ // executed in-line with the PORE assembly code. Instances of the
+ // .hook macro are inserted into the assembler input by the
+ // hook_extractor script, to mark the locations where hooks are
+ // present. The hook reference is a string that combines the source
+ // file name with an index number to uniquely identify the hook.
+ //
+ // .hook <file name>_<sequence number>
+ //
+ // The .hook macro marks the location of each hook in the relocatable
+ // binaries with special symbols. The symbol name includes the hook
+ // reference, which is used to locate the hook in the HookManager
+ // symbol table. Because hooks can be defined in macros, a hook that
+ // appears once in a source file may appear multiple times in the
+ // final binary. For this reason each hook must also be tagged with a
+ // unique index number to avoid symbol name collisions. The
+ // complexity of the .hook macro is due to the necessity to decode a
+ // dynamic symbol value (_PGAS_HOOK_INDEX) into its binary string form
+ // to create the unique symbol name. The final hook symbol has the
+ // form:
+ //
+ // __hook__<unique>_<reference>
+ //
+ // where <unique> is a binary string. It is then straightforward to
+ // locate these symbols in the 'nm' output of the final link and
+ // create a map of final addresses to the hook routine to call (the
+ // <reference>) before executing the instruction at that address.
+ //
+ // Note: The maximum nesting depth of the recursive ..hook_helper
+ // macro is log2(index), and the assembler supports nesting of at
+ // least 32 which is much more than sufficient.
+
+ .set _PGAS_HOOK_INDEX, 0
+
+ .macro .hook, reference:req
+ .set _PGAS_HOOK_INDEX, (_PGAS_HOOK_INDEX + 1)
+ ..hook_helper _PGAS_HOOK_INDEX, "", \reference
+ .endm
+
+ .macro ..hook_helper, index, unique, reference
+ .ifeq \index
+ __hook__\unique\()_\reference\():
+ .elseif (\index % 2)
+ ..hook_helper (\index / 2), 1\unique, \reference
+ .else
+ ..hook_helper (\index / 2), 0\unique, \reference
+ .endif
+ .endm
+
+
+////////////////////////////////////////////////////////////////////////////
+// Help for Conversion from Old to New PGAS syntax
+////////////////////////////////////////////////////////////////////////////
+
+ .macro loadp, arg:vararg
+ .error "PGAS now implements 'lpcs' rather then 'loadp'"
+ .endm
+
+ .macro loadx, arg:vararg
+ .error "PGAS now implements 'la' rather than 'loadx'"
+ .endm
+
+#endif // __ASSEMBLER__
+
+#ifdef PGAS_PPC
+#include "pgas_ppc.h"
+#endif
+
+#endif // __PGAS_H__
diff --git a/libpore/pore_inline.h b/libpore/pore_inline.h
new file mode 100644
index 0000000..f74aa6f
--- /dev/null
+++ b/libpore/pore_inline.h
@@ -0,0 +1,881 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline.h $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef __PORE_INLINE_H__
+#define __PORE_INLINE_H__
+
+// $Id: pore_inline.h,v 1.20 2013/12/11 00:11:13 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/pore_inline.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+// ** WARNING : This file is maintained as part of the OCC firmware. Do **
+// ** not edit this file in the PMX area or the hardware procedure area **
+// ** as any changes will be lost. **
+
+/// \file pore_inline.h
+/// \brief Inline assembler for PORE code
+///
+/// Note that this file defines several short macro symbols for register names
+/// and other mnemonics used by inline assembly. For this reason it would
+/// probably be best to only include this header when it was absolutely
+/// necessary, i.e., only in C files that explicitly use inline assembly and
+/// disassembly.
+
+#ifndef PPC_HYP
+#include <ctype.h>
+#include <stddef.h>
+#include <stdint.h>
+#endif // PPC_HYP
+#include "pgas.h"
+
+#if( defined(__cplusplus) && !defined(PLIC_MODULE) )
+extern "C" {
+#endif
+#if 0
+} /* So __cplusplus doesn't mess w/auto-indent */
+#endif
+
+
+#ifndef __ASSEMBLER__
+
+// PHYP tools do not support 'static' functions and variables as it interferes
+// with their concurrent patch methodology. So when compiling for PHYP the
+// PORE instruction "macros" are simply declared "inline". This also extends
+// into the implementation C files - so under PHYP all previosuly local static
+// functions will now be global functions. We retain 'static' to reduce code
+// size and improve abstraction for OCC applications.
+
+#ifdef PPC_HYP
+#define PORE_STATIC
+#include <p8_pore_api_custom.h>
+#else
+#define PORE_STATIC static
+#endif
+
+/// Error code strings from the PORE inline assembler/disassembler
+///
+/// The PoreInlineContext object stores error codes that occur during
+/// assembly as small integers. The '0' code indicates success. This is a
+/// table of strings that describe the codes. It will be instantiated in
+/// pore_inline.c
+
+extern const char *pore_inline_error_strings[];
+
+#ifdef __PORE_INLINE_ASSEMBLER_C__
+const char *pore_inline_error_strings[] = {
+ "No error",
+ "The inline assembler memory is full, or disassembly has reached the end of the memory area",
+ "The instruction requires an ImD24 operand",
+ "The LC is not aligned or the instruction requires an aligned operand",
+ "The branch target is unreachable (too distant)",
+ "A register operand is illegal for the given instruction",
+ "The instruction form requires a signed 16-bit immediate",
+ "Valid rotate lengths are 1, 4, 8, 16 and 32",
+ "The instruction requires a 20-bit signed immediate",
+ "The instruction requires a 24-bit unsigned immediate",
+ "A parameter to pore_inline_context_create() is invalid",
+ "The instruction form requires an unsigned 22-bit immediate",
+ "This error is due to a bug in the PORE inline assembler (Please report)",
+ "The 'source' label for pore_inline_branch_fixup() is illegal",
+ "The 'source' instruction for pore_inline_branch_fixup() is not a branch",
+ "The disassembler does not recognize the instruction as a PORE opcode",
+ "Instruction parity error during disassembly",
+ "The string form of the disassembly is too long to represent (Please report)`",
+ "Use HALT instead of WAIT 0 if the intention is to halt.",
+ "A putative SCOM address is illegal (has non-0 bits where 0s are expected)."
+};
+#endif /* __PORE_INLINE_ASSEMBLER_C__ */
+
+#endif /* __ASSEMBLER__ */
+
+#define PORE_INLINE_SUCCESS 0
+#define PORE_INLINE_NO_MEMORY 1
+#define PORE_INLINE_IMD24_ERROR 2
+#define PORE_INLINE_ALIGNMENT_ERROR 3
+#define PORE_INLINE_UNREACHABLE_TARGET 4
+#define PORE_INLINE_ILLEGAL_REGISTER 5
+#define PORE_INLINE_INT16_REQUIRED 6
+#define PORE_INLINE_ILLEGAL_ROTATE 7
+#define PORE_INLINE_INT20_REQUIRED 8
+#define PORE_INLINE_UINT24_REQUIRED 9
+#define PORE_INLINE_INVALID_PARAMETER 10
+#define PORE_INLINE_UINT22_REQUIRED 11
+#define PORE_INLINE_BUG 12
+#define PORE_INLINE_ILLEGAL_SOURCE_LC 13
+#define PORE_INLINE_NOT_A_BRANCH 14
+#define PORE_INLINE_UNKNOWN_OPCODE 15
+#define PORE_INLINE_PARITY_ERROR 16
+#define PORE_INLINE_DISASSEMBLY_OVERFLOW 17
+#define PORE_INLINE_USE_HALT 18
+#define PORE_INLINE_ILLEGAL_SCOM_ADDRESS 19
+
+
+/// Register name strings for the PORE inline assembler/disassembler
+
+extern const char *pore_inline_register_strings[16];
+
+// C++ requires that these arrays of strings be declared 'const' to avoid
+// warnings. But then you get warnings when the strings get stored into
+// non-const variables. The solution is to rename these arrays inside the
+// disassembler. If anyone has a better solution please let me know - Bishop
+
+#ifdef __PORE_INLINE_ASSEMBLER_C__
+const char* pore_inline_register_strings[16] = {
+ "P0", "P1", "A0", "A1", "CTR", "D0", "D1", "EMR",
+ "?", "ETR", "SPRG0", "?", "?", "?", "PC", "IFR"
+};
+#endif /* __PORE_INLINE_ASSEMBLER_C__ */
+
+
+// Shorthand forms of constants defined in pgas.h, defined for consistency
+// using the assembler-supported names. These constants are defined as an
+// enum to avoid name conflicts with some firmware symbols when the PORE
+// inline facility is used to create Host Boot procedures.
+
+enum {
+
+ // Shorthand register mnemonics, defined as an enum to avoid name clashes.
+
+ P0 = PORE_REGISTER_PRV_BASE_ADDR0,
+ P1 = PORE_REGISTER_PRV_BASE_ADDR1,
+ A0 = PORE_REGISTER_OCI_BASE_ADDR0,
+ A1 = PORE_REGISTER_OCI_BASE_ADDR1,
+ CTR = PORE_REGISTER_SCRATCH0,
+ D0 = PORE_REGISTER_SCRATCH1,
+ D1 = PORE_REGISTER_SCRATCH2,
+ EMR = PORE_REGISTER_ERROR_MASK,
+ ETR = PORE_REGISTER_EXE_TRIGGER,
+ SPRG0 = PORE_REGISTER_DATA0,
+ PC = PORE_REGISTER_PC,
+ IFR = PORE_REGISTER_IBUF_ID,
+
+ // PgP IBUF_ID values
+
+ PORE_GPE0 = PORE_ID_GPE0,
+ PORE_GPE1 = PORE_ID_GPE1,
+ PORE_SLW = PORE_ID_SLW,
+ PORE_SBE = PORE_ID_SBE,
+
+ // Condition Codes
+
+ CC_UGT = PORE_CC_UGT,
+ CC_ULT = PORE_CC_ULT,
+ CC_SGT = PORE_CC_SGT,
+ CC_SLT = PORE_CC_SLT,
+ CC_C = PORE_CC_C,
+ CC_V = PORE_CC_V,
+ CC_N = PORE_CC_N,
+ CC_Z = PORE_CC_Z,
+};
+
+// Pseudo-opcodes for LD/LDANDI/STD
+
+#define PORE_INLINE_PSEUDO_LD 0
+#define PORE_INLINE_PSEUDO_LDANDI 1
+#define PORE_INLINE_PSEUDO_STD 2
+
+
+// Private version of _BIG_ENDIAN
+
+#ifndef _BIG_ENDIAN
+#define PORE_BIG_ENDIAN 0
+#else
+#define PORE_BIG_ENDIAN _BIG_ENDIAN
+#endif
+
+
+/// Maximum size of disassembly strings
+///
+/// This is currently sufficient for PORE_INLINE_LISTING_MODE. We don't want
+/// to make this too long since the PoreInlineDisassembly object may be on the
+/// stack in embedded applications.
+#define PORE_INLINE_DISASSEMBLER_STRING_SIZE 128
+
+
+/// Generate PORE instruction parity
+///
+/// This flag is an option to pore_inline_context_create(). If set, PORE
+/// inline assembly sets the instruction parity bit for each assembled
+/// instruction; otherwise the instruction parity bit is always 0.
+#define PORE_INLINE_GENERATE_PARITY 0x01
+
+/// Check PORE instruction parity
+///
+/// This flag is an option to pore_inline_context_create(). If set, PORE
+/// inline disassembly checks the instruction parity bit for each disassembled
+/// instruction, failing with PORE_INLINE_PARITY_ERROR if the parify is not
+/// correct. Otherwise the instruction parity bit is ignored during
+/// disassembly.
+#define PORE_INLINE_CHECK_PARITY 0x02
+
+/// Disassemble in listing mode
+///
+/// This flag is an option to pore_inline_context_create(). If set, then
+/// generate disassembly strings in the form of a listing that contains
+/// location counters and encoded instructions as well as their diassembly.
+/// By default the disassembly strings do not contain this information and can
+/// be fed back in as source code to a PORE assembler.
+#define PORE_INLINE_LISTING_MODE 0x04
+
+/// Disassemble in data mode
+///
+/// This flag is an option to pore_inline_context_create(). If set, then
+/// generate disassembly assuming that the context contains data rather than
+/// text. Normally data is disassembled as .long directives, however if the
+/// context is unaligned or of an odd length then .byte directives may be used
+/// as well. This option can be used in conjunction with
+/// PORE_INLINE_LISTING_MODE and PORE_INLINE_8_BYTE_DATA.
+///
+/// Note: An intelligent application can switch between the default text
+/// disassembly and data disassembly by manipulating the \a options field of
+/// the PoreInlineContext between calls of pore_inline_disassemble().
+#define PORE_INLINE_DISASSEMBLE_DATA 0x08
+
+/// Disassemble data in 8-byte format
+///
+/// This flag is an option to pore_inline_context_create(). If set, then if
+/// PORE_INLINE_DISASSEMBLE_DATA is also set then generate data disassembly as
+/// 8-byte values rather then the default 4-byte values. Normally data is
+/// disassembled as .quad directives under this option, however if the context
+/// is unaligned or of an odd length then .long and .byte directives may be
+/// used as well. This option can be used in conjunction with
+/// PORE_INLINE_LISTING_MODE.
+///
+/// Note: An intelligent application can switch between the default text
+/// disassembly and data disassembly by manipulating the \a options field of
+/// the PoreInlineContext between calls of pore_inline_disassemble().
+#define PORE_INLINE_8_BYTE_DATA 0x10
+
+/// Disassemble unrecognized opcodes as 4-byte data
+///
+/// This flag is an option to pore_inline_context_create(). If set, then
+/// any putative instruction with an unrecognized opcode will be silently
+/// diassembled as 4-byte data.
+///
+/// This option was added to allow error-free disassembly of
+/// non-parity-protected PORE text sections that contain 0x00000000 alignment
+/// padding, and is not guaranteed to produce correct or consistent results in
+/// any other case.
+#define PORE_INLINE_DISASSEMBLE_UNKNOWN 0x20
+
+
+#ifndef __ASSEMBLER__
+
+/// The type of location counters for the PORE inline assembler
+
+typedef uint32_t PoreInlineLocation;
+
+/// PORE inline assembler context
+///
+/// See the documentation page \ref pore_inline_assembler and the function
+/// pore_inline_context_create() for futher details.
+
+typedef struct {
+
+ /// The memory area to receive the inline assembly
+ ///
+ /// This field is never modified, allowing the *reset* APIs to function.
+ ///
+ /// Note: C++ does not allow arithmetic on void* objects, so we use the
+ /// Linux convention of storing memory addresses as type 'unsigned long'.
+ unsigned long memory;
+
+ /// The original size of the memory area to receive the inline assembly
+ ///
+ /// This field is never modified, allowing the *reset* APIs to function.
+ size_t size;
+
+ /// The original Location Counter (associated with \a memory)
+ ///
+ /// This field is never modified, allowing the *reset* APIs to function.
+ PoreInlineLocation original_lc;
+
+ /// The memory address associated with the current LC
+ ///
+ /// Note: C++ does not allow arithmetic on void* objects, so we use the
+ /// Linux convention of storing memory addresses as type 'unsigned long'.
+ unsigned long lc_address;
+
+ /// The remaining size of the memory area to receive the inline assembly
+ size_t remaining;
+
+ /// The bytewise Location Counter of the assembled code
+ PoreInlineLocation lc;
+
+ /// Inline assembly options
+ ///
+ /// This field is never modified, allowing the *reset* APIs to function.
+ int options;
+
+ /// The last error code generated by the inline assembler
+ int error;
+
+} PoreInlineContext;
+
+
+/// PORE inline disassembler result
+///
+/// This object holds the disassembly produced by pore_inline_disassemble().
+/// See documentation for that function for complete details.
+
+typedef struct {
+
+ /// The context as it existed when the instruction was assembled
+ ///
+ /// Disassembling an instruction modifies the context provided to
+ /// pore_inline_disassemble() to point to the next instruction. This
+ /// structure stores a copy of the context at the initial call of
+ /// pore_inline_disassemble(), that is, the context in effect when the
+ /// dissassembled instruction was assembled.
+ PoreInlineContext ctx;
+
+ /// The first 32 bits of every instruction
+ uint32_t instruction;
+
+ /// The opcode; bits 0..6 of the instruction
+ int opcode;
+
+ /// A flag - If set the opcode is for a 12-byte instruction
+ int long_instruction;
+
+ /// The parity bit; bit 7 of the instruction
+ int parity;
+
+ /// The register specifier at bits 8..11 of the instruction
+ ///
+ /// This register is sometimes called the source, sometimes the target,
+ /// depending on the opcode.
+ int r0;
+
+ /// The register specifier at bits 12..15 of the instruction
+ ///
+ /// This register is always called the 'source' but is named generically
+ /// here since sometimes the specifier at bits 8..11 is also called a
+ /// 'source'.
+ int r1;
+
+ /// 'ImD16' is the signed 16-bit immediate for short immediate adds and
+ /// subtracts. For the rotate instruction this field also contains the
+ /// rotate count which is either 1, 4, 8, 16 or 32.
+ int16_t imd16;
+
+ /// 'ImD20' is the 20-bit signed immediate for the LOAD20 instruction
+ int32_t imd20;
+
+ /// 'ImD24' is the 24-bit unsigned immediate for the WAIT instruction
+ uint32_t imd24;
+
+ /// 'ImD64' is the 64-bit immediate for data immediates and BRAI. This
+ /// field is only set for 3-word instructions.
+ uint64_t imd64;
+
+ /// 'ImPCO20' is a signed, 20-bit word offset for branch instructions
+ int32_t impco20;
+
+ /// 'ImPCO24' is a signed, 24-bit word offset for branch instructions
+ int32_t impco24;
+
+ /// For imA24 opcodes, this indicates memory/pib (1/0) addressing..
+ int memory_space;
+
+ /// This is the base register specifier - either a memory (OCI) base
+ /// register or a pervasive base register - for Read/Write operations.
+ /// Note that this is a PORE register index, not simply 0/1.
+ int base_register;
+
+ /// This is the 22-bit signed offset for memory (OCI) addressing. This
+ /// unsigned offset is added to a memory base register (A0/A1) to form the
+ /// final 32-bit address.
+ uint32_t memory_offset;
+
+ /// This field contains the port number and local address portions of the
+ /// PIB/PCB address for load/store operations that target the PIB/PCB.
+ /// Note that bits 0..11 will always be 0 in this address. Bits 1..7 (the
+ /// multicast bit and chiplet id) are sourced from the associated
+ /// pervasive base register when the instruction executes.
+ uint32_t pib_offset;
+
+ /// The update bit of the SCAND instruction
+ int update;
+
+ /// The capture bit of the SCAND instruction
+ int capture;
+
+ /// The scan length from a SCAND instruction
+ int scan_length;
+
+ /// The scan select from a SCAND instruction
+ uint32_t scan_select;
+
+ /// The address offset from a SCAND instruction
+ uint32_t scan_offset;
+
+ /// The string form of the disassembly.
+ ///
+ /// The disassembly string is \e not terminated by a newline. In listing
+ /// mode the disassembly string \e will contain embedded newlines for long
+ /// instructions.
+ char s[PORE_INLINE_DISASSEMBLER_STRING_SIZE];
+
+ /// The data (for data disassembly)
+ ///
+ /// This is either 1, 4 or 8 bytes in host byte order.
+ uint64_t data;
+
+ /// The size of the disassembled \a data field (for data disassembly)
+ size_t data_size;
+
+ /// Was this location disassembled as an instruction (0) or as data (1)
+ int is_data;
+
+} PoreInlineDisassembly;
+
+
+// These are internal APIs - they are not needed by application code.
+
+void
+pore_inline_be32(unsigned long p, uint32_t x);
+
+void
+pore_inline_be64(unsigned long p, uint64_t x);
+
+uint32_t
+pore_inline_host32(unsigned long p);
+
+uint64_t
+pore_inline_host64(unsigned long p);
+
+int
+pore_inline_parity(uint32_t instruction, uint64_t imd64);
+
+void
+pore_inline_context_bump(PoreInlineContext *ctx, size_t bytes);
+
+int
+pore_inline_instruction1(PoreInlineContext *ctx, int opcode, uint32_t operand);
+
+int
+pore_inline_instruction3(PoreInlineContext *ctx, int opcode, uint32_t operand,
+ uint64_t imm);
+
+int
+pore_inline_bra(PoreInlineContext *ctx,
+ int opcode, PoreInlineLocation target);
+
+int
+pore_inline_brac(PoreInlineContext *ctx,
+ int opcode, int reg, PoreInlineLocation target);
+
+int
+pore_inline_cmpibra(PoreInlineContext *ctx,
+ int opcode, int reg,
+ PoreInlineLocation target, uint64_t imm);
+
+int
+pore_inline_brad(PoreInlineContext *ctx, int opcode, int reg);
+
+int
+pore_inline_ilogic(PoreInlineContext *ctx,
+ int opcode, int dest, int src, uint64_t imm);
+int
+pore_inline_alurr(PoreInlineContext *ctx,
+ int opcode, int dest, int src1, int src2);
+
+int
+pore_inline_adds(PoreInlineContext *ctx,
+ int opcode, int dest, int src, int imm);
+
+int
+pore_inline_load_store(PoreInlineContext *ctx,
+ int opcode, int src_dest, int32_t offset, int base,
+ uint64_t imm);
+
+
+// These are utility APIs that may be required by special-purpose code that
+// uses the pore_inline library.
+
+void
+pore_inline_decode_instruction(PoreInlineDisassembly* dis,
+ uint32_t instruction);
+
+void
+pore_inline_decode_imd64(PoreInlineDisassembly* dis, uint64_t imd64);
+
+
+// These are the inline PORE instructions, extended mnemonics and pseudo-ops
+// to be used by application code.
+
+/// Set a location counter variable from a context
+///
+/// This is a macro that sets the \a var (of type PoreInlineLocation) to the
+/// current location counter of the \a ctx. The macro produces an expression
+/// that evaluates to 0 so that it can be used in the logical-OR expressions
+/// used to define inline assembly sequences.
+
+#define PORE_LOCATION(ctx, var) (((var) = (ctx)->lc), 0)
+
+int
+pore_inline_context_create(PoreInlineContext *context,
+ void *memory,
+ size_t size,
+ PoreInlineLocation lc,
+ int options);
+
+void
+pore_inline_context_reset(PoreInlineContext *context);
+
+void
+pore_inline_context_reset_excursion(PoreInlineContext *context);
+
+void
+pore_inline_context_copy(PoreInlineContext *dest, PoreInlineContext *src);
+
+
+int
+pore_inline_branch_fixup(PoreInlineContext *ctx,
+ PoreInlineLocation source,
+ PoreInlineLocation target);
+
+
+int
+pore_inline_disassemble(PoreInlineContext *ctx, PoreInlineDisassembly *dis);
+
+
+// Native PORE instruction assembly, using PGAS opcode names and operand
+// ordering rules.
+
+// NOP, TRAP, RET
+
+PORE_STATIC inline int
+pore_NOP(PoreInlineContext *ctx)
+{
+ return pore_inline_instruction1(ctx, PGAS_OPCODE_NOP, 0);
+}
+
+
+PORE_STATIC inline int
+pore_TRAP(PoreInlineContext *ctx)
+{
+ return pore_inline_instruction1(ctx, PGAS_OPCODE_TRAP, 0);
+}
+
+
+PORE_STATIC inline int
+pore_RET(PoreInlineContext *ctx)
+{
+ return pore_inline_instruction1(ctx, PGAS_OPCODE_RET, 0);
+}
+
+
+// WAITS, HALT, HOOKI
+
+int
+pore_WAITS(PoreInlineContext *ctx, uint32_t cycles);
+
+PORE_STATIC inline int
+pore_HALT(PoreInlineContext *ctx)
+{
+ return pore_inline_instruction1(ctx, PGAS_OPCODE_WAITS, 0);
+}
+
+int
+pore_HOOKI(PoreInlineContext *ctx, uint32_t index, uint64_t imm);
+
+
+// BRA, BSR, LOOP
+
+PORE_STATIC inline int
+pore_BRA(PoreInlineContext *ctx, PoreInlineLocation target)
+{
+ return pore_inline_bra(ctx, PGAS_OPCODE_BRA, target);
+}
+
+PORE_STATIC inline int
+pore_BSR(PoreInlineContext *ctx, PoreInlineLocation target)
+{
+ return pore_inline_bra(ctx, PGAS_OPCODE_BSR, target);
+}
+
+PORE_STATIC inline int
+pore_LOOP(PoreInlineContext *ctx, PoreInlineLocation target)
+{
+ return pore_inline_bra(ctx, PGAS_OPCODE_LOOP, target);
+}
+
+
+// BRAZ, BRANZ
+
+PORE_STATIC inline int
+pore_BRAZ(PoreInlineContext *ctx, int reg, PoreInlineLocation target)
+{
+ return pore_inline_brac(ctx, PGAS_OPCODE_BRAZ, reg, target);
+}
+
+
+PORE_STATIC inline int
+pore_BRANZ(PoreInlineContext *ctx, int reg, PoreInlineLocation target)
+{
+ return pore_inline_brac(ctx, PGAS_OPCODE_BRANZ, reg, target);
+}
+
+
+// CMPIBRAEQ, CMPIBRANE, CMPIBSREQ
+
+PORE_STATIC inline int
+pore_CMPIBRAEQ(PoreInlineContext *ctx,
+ int reg, PoreInlineLocation target, uint64_t imm)
+{
+ return pore_inline_cmpibra(ctx, PGAS_OPCODE_CMPIBRAEQ, reg, target, imm);
+}
+
+
+PORE_STATIC inline int
+pore_CMPIBRANE(PoreInlineContext *ctx,
+ int reg, PoreInlineLocation target, uint64_t imm)
+{
+ return pore_inline_cmpibra(ctx, PGAS_OPCODE_CMPIBRANE, reg, target, imm);
+}
+
+
+PORE_STATIC inline int
+pore_CMPIBSREQ(PoreInlineContext *ctx,
+ int reg, PoreInlineLocation target, uint64_t imm)
+{
+ return pore_inline_cmpibra(ctx, PGAS_OPCODE_CMPIBSREQ, reg, target, imm);
+}
+
+
+// BRAD, BSRD
+
+PORE_STATIC inline int
+pore_BRAD(PoreInlineContext *ctx, int reg) {
+ return pore_inline_brad(ctx, PGAS_OPCODE_BRAD, reg);
+}
+
+PORE_STATIC inline int
+pore_BSRD(PoreInlineContext *ctx, int reg) {
+ return pore_inline_brad(ctx, PGAS_OPCODE_BSRD, reg);
+}
+
+
+// ANDI, ORI, XORI
+
+PORE_STATIC inline int
+pore_ANDI(PoreInlineContext *ctx, int dest, int src, uint64_t imm)
+{
+ return pore_inline_ilogic(ctx, PGAS_OPCODE_ANDI, dest, src, imm);
+}
+
+PORE_STATIC inline int
+pore_ORI(PoreInlineContext *ctx, int dest, int src, uint64_t imm)
+{
+ return pore_inline_ilogic(ctx, PGAS_OPCODE_ORI, dest, src, imm);
+}
+
+PORE_STATIC inline int
+pore_XORI(PoreInlineContext *ctx, int dest, int src, uint64_t imm)
+{
+ return pore_inline_ilogic(ctx, PGAS_OPCODE_XORI, dest, src, imm);
+}
+
+
+// AND, OR, XOR, ADD, SUB
+
+PORE_STATIC inline int
+pore_AND(PoreInlineContext *ctx, int dest, int src1, int src2)
+{
+ return pore_inline_alurr(ctx, PGAS_OPCODE_AND, dest, src1, src2);
+}
+
+PORE_STATIC inline int
+pore_OR(PoreInlineContext *ctx, int dest, int src1, int src2)
+{
+ return pore_inline_alurr(ctx, PGAS_OPCODE_OR, dest, src1, src2);
+}
+
+PORE_STATIC inline int
+pore_XOR(PoreInlineContext *ctx, int dest, int src1, int src2)
+{
+ return pore_inline_alurr(ctx, PGAS_OPCODE_XOR, dest, src1, src2);
+}
+
+PORE_STATIC inline int
+pore_ADD(PoreInlineContext *ctx, int dest, int src1, int src2)
+{
+ return pore_inline_alurr(ctx, PGAS_OPCODE_ADD, dest, src1, src2);
+}
+
+PORE_STATIC inline int
+pore_SUB(PoreInlineContext *ctx, int dest, int src1, int src2)
+{
+ return pore_inline_alurr(ctx, PGAS_OPCODE_SUB, dest, src1, src2);
+}
+
+
+// ADDS, SUBS
+
+PORE_STATIC inline int
+pore_ADDS(PoreInlineContext *ctx, int dest, int src, int imm)
+{
+ return pore_inline_adds(ctx, PGAS_OPCODE_ADDS, dest, src, imm);
+}
+
+PORE_STATIC inline int
+pore_SUBS(PoreInlineContext *ctx, int dest, int src, int imm)
+{
+ return pore_inline_adds(ctx, PGAS_OPCODE_SUBS, dest, src, imm);
+}
+
+
+// NEG, MR, ROLS, LS, LI
+
+int
+pore_NEG(PoreInlineContext *ctx, int dest, int src);
+
+int
+pore_MR(PoreInlineContext *ctx, int dest, int src);
+
+int
+pore_ROLS(PoreInlineContext *ctx, int dest, int src, int imm);
+
+int
+pore_LS(PoreInlineContext *ctx, int dest, int imm);
+
+int
+pore_LI(PoreInlineContext *ctx, int dest, uint64_t imm);
+
+
+// LD, LDANDI, STD, STI, BSI, BCI
+
+PORE_STATIC inline int
+pore_LD(PoreInlineContext *ctx, int dest, int32_t offset, int base)
+{
+ return
+ pore_inline_load_store(ctx,
+ PORE_INLINE_PSEUDO_LD, dest, offset, base, 0);
+}
+
+PORE_STATIC inline int
+pore_LDANDI(PoreInlineContext *ctx,
+ int dest, int32_t offset, int base, uint64_t imm)
+{
+ return
+ pore_inline_load_store(ctx,
+ PORE_INLINE_PSEUDO_LDANDI,
+ dest, offset, base, imm);
+}
+
+PORE_STATIC inline int
+pore_STD(PoreInlineContext *ctx, int src, int32_t offset, int base)
+{
+ return
+ pore_inline_load_store(ctx,
+ PORE_INLINE_PSEUDO_STD, src, offset, base, 0);
+}
+
+PORE_STATIC inline int
+pore_STI(PoreInlineContext *ctx, int32_t offset, int base, uint64_t imm)
+{
+ return
+ pore_inline_load_store(ctx,
+ PGAS_OPCODE_STI, 0, offset, base, imm);
+}
+
+
+#ifdef IGNORE_HW274735
+
+// BSI and BCI are redacted as instructions and reimplemented as "macros" due
+// to HW274735, unless specifically overridden. Note that the inline assembler
+// will allow D1 to be used as scratch here, unlike the underlying hardware
+// instruction.
+
+PORE_STATIC inline int
+pore_BSI(PoreInlineContext *ctx,
+ int src, int32_t offset, int base, uint64_t imm)
+{
+ return
+ pore_inline_load_store(ctx,
+ PGAS_OPCODE_BSI, src, offset, base, imm);
+}
+
+PORE_STATIC inline int
+pore_BCI(PoreInlineContext *ctx,
+ int src, int32_t offset, int base, uint64_t imm)
+{
+ return
+ pore_inline_load_store(ctx,
+ PGAS_OPCODE_BCI, src, offset, base, imm);
+}
+
+#else
+
+PORE_STATIC inline int
+pore_BSI(PoreInlineContext *ctx,
+ int src, int32_t offset, int base, uint64_t imm)
+{
+ return
+ ((pore_LD(ctx, src, offset, base) ||
+ pore_ORI(ctx, src, src, imm) ||
+ pore_STD(ctx, src, offset, base)) ? ctx->error : 0);
+}
+
+PORE_STATIC inline int
+pore_BCI(PoreInlineContext *ctx,
+ int src, int32_t offset, int base, uint64_t imm)
+{
+ return
+ ((pore_LDANDI(ctx, src, offset, base, ~imm) ||
+ pore_STD(ctx, src, offset, base)) ? ctx->error : 0);
+}
+
+#endif // IGNORE_HW274735
+
+
+// BRAIA
+
+int
+pore_BRAIA(PoreInlineContext *ctx,
+ uint16_t address_space, uint32_t offset);
+
+
+// SCAND
+
+int
+pore_SCAND(PoreInlineContext *ctx,
+ int update, int capture, uint16_t length,
+ uint32_t select, uint32_t offset);
+
+#endif /* __ASSEMBLER__ */
+
+#if 0
+{ /* So __cplusplus doesn't mess w/auto-indent */
+#endif
+#if( defined(__cplusplus) && !defined(PLIC_MODULE) )
+}
+#endif
+
+#endif /* __PORE_INLINE_H__ */
+
diff --git a/libpore/pore_inline_assembler.c b/libpore/pore_inline_assembler.c
new file mode 100644
index 0000000..470b2fa
--- /dev/null
+++ b/libpore/pore_inline_assembler.c
@@ -0,0 +1,1507 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/pore_inline_assembler.c $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+// $Id: pore_inline_assembler.c,v 1.22 2013/12/11 00:11:14 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/pore_inline_assembler.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2013
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+
+// ** WARNING : This file is maintained as part of the OCC firmware. Do **
+// ** not edit this file in the PMX area or the hardware procedure area **
+// ** as any changes will be lost. **
+
+/// \file pore_inline_assembler.c
+/// \brief Inline PGAS assembler for PgP/Stage1 PORE
+///
+/// \page pore_inline_assembler PORE Inline Assembler and Disassembler
+///
+/// Several procedures targeting the PORE engine require inline assembly and
+/// disassembly of PORE code, that is, they require that PORE instructions be
+/// assembled/disassembled directly into/from a host memory buffer. This page
+/// describes these facilities. The APIs described here are implemented in
+/// the files pore_inline.h, pore_inline_assembler.c and
+/// pore_inline_disassembler.c. Both the inline assembelr and disassembler
+/// conform to the PGAS assembly format for PORE.
+///
+/// Both inline assembly and disassembly make use of a PoreInlineContext
+/// structure. This structure represents the state of a memory area being
+/// targeted for inline assembly and disassembly. The context is initialized
+/// with the pore_inline_context_create() API, and a pointer to an instance of
+/// this structure appears as the first argument of all assembler/disassembler
+/// APIs. As assembly/disassembly progresses the PoreInlineContext keeps
+/// track of how much host memory area has been filled by assembled code or
+/// scanned by the disassebler.
+///
+/// Assembler/disassembler APIs are predicates that return 0 for success and a
+/// non-zero error code for failure. In the event of failure, the error code
+/// (a small integer) is also stored in the \a error field of the context
+/// structure. String forms of the error codes are also available in the
+/// global array pore_inline_error_strings[].
+///
+/// The assembler always produces PORE code in the PORE-native big-endian
+/// format. Likewise, the diassembler assumes the host memory to be
+/// disassembled contains PORE code in big-endian format.
+///
+/// \section Initialization
+///
+/// Before invoking inline assembly/disassembly APIs, an instance of a
+/// PoreInlineContext structure must be initialized using the
+/// pore_inline_context_create() API. For assembly, the context describes the
+/// host memory buffer that will contain the assembled code. For disassembly,
+/// the context describes the host memory area that contains the code to be
+/// disassembled. Full documentation is available for
+/// pore_inline_context_create(), including documentation for options that
+/// control assembly and disassembly. The implementation also provides a
+/// 'copy operator' for the context, pore_inline_context_copy().
+///
+/// An example of initializing a context for inline assembly with parity
+/// checking appears below.
+///
+/// \code
+///
+/// PoreInlineContext ctx;
+/// uint32_t buf[BUFSIZE];
+///
+/// rc = pore_inline_context_create(&ctx, buf, BUFSIZE * 4, 0,
+/// PORE_INLINE_CHECK_PARITY);
+/// if (rc) . . . Handle Error
+///
+/// \endcode
+///
+/// Applications that reuse the same memory buffer for assembling and
+/// processing multiple PORE programs can 'reset' the context between uses by
+/// using the pore_inline_context_reset() API. pore_inline_context_reset()
+/// resets the location counter and memory extent to their initial (creation)
+/// values, and the context error code is cleared. Any options specified at
+/// creation remain as they were.
+///
+/// \section Assembler
+///
+/// The inline assembler implements each PORE/PGAS instruction as individual
+/// function calls. The APIs are consistently named \c pore_\<OPCODE\>, where
+/// \c \<OPCODE\> is a PGAS mnemonic in upper case. The arguments to each
+/// opcode appear in the same order that they appear in the source-level
+/// assembler, with appropriate C-language types. The supported opcode APIs
+/// are defined in pore_inline.h
+///
+/// Since the PORE instruction APIs are effectivly predicates, linear code
+/// sequences are easily assembled using the C-language logical OR construct.
+/// Any non-0 return code will immediatly break the sequence and set the
+/// expression value to 1. The failure code can then be recovered from the \a
+/// error field of the context. This coding technique is illustrated in the
+/// following example of assembling a memory-memory copy sequence.
+///
+/// \code
+///
+/// PoreInlineContext ctx;
+/// int error;
+///
+/// . . . // Initialize context
+///
+/// error =
+/// pore_LD(&ctx, D0, 0, A0) ||
+/// pore_STD(&ctx, D0, 0, A1);
+///
+/// if (error) <. . . Handle error based on ctx.error>
+///
+/// \endcode
+///
+/// The above example generates code equivalent to
+///
+/// \code
+///
+/// ld D0, 0, A0
+/// std D0, 0, A1
+///
+/// \endcode
+///
+/// Again, if an error were to occur during assembly, inline assembly would
+/// stop (and the logical OR would terminate) at the point of failure. In
+/// particular, the inline assembler will never allow assembled code to exceed
+/// the bounds of the memory area defined by the initial call of
+/// pore_inline_context_create() that defines the assembler memory space.
+///
+///
+/// \subsection Register Names and Other Mnemonics
+///
+/// The header file pore_inline.h defines macros for the register mnemonics.
+///
+/// - D0, D1 : 64-bit data registers
+/// - A0, A1 : 32-bit address registers
+/// - P0, P1 : 7-bit Pervasive chiplet id registers
+/// - CTR : 24-bit ounter register
+/// - PC : 48-bit Program Counter
+/// - ETR : 64-bit EXE-Trigger Register (Low-order 32 bits are writable)
+/// - EMR : The Error Mask Register
+/// - IFR : ID/Flags Register
+/// - SPRG0 : 32-bit Special-Purpose General Register 0
+///
+/// Mnemonics for the condition code bits are also defined by pore_inline.h
+/// using the PGAS mnemonics.
+///
+///
+/// \subsection Assembling Branches
+///
+/// Opcodes that implement relative branches require that the branch target be
+/// specified as a <em> location counter </em>. Once initialized, the current
+/// location counter is available as the \a lc field of the PoreInlineContext
+/// object controlling the assembly. The \a lc field is the only field
+/// (besides the error code held in the \a error field) that application code
+/// should ever reference. The inline assembler also provides a typedef
+/// PoreInlineLocation to use for location counters, as well as the macro
+/// PORE_LOCATION() to define a location variable inline with the code flow.
+///
+/// \subsubsection Backward Branches
+///
+/// Backward branches are straightforward. For example, the memory-memory
+/// copy example from earlier can be converted into a loop as shown below. The
+/// \a loop_target variable is initialized with the location counter of the
+/// first instruction of the loop. The final instruction of the loop then
+/// branches back to the \a loop_target.
+///
+/// \code
+///
+/// PoreInlineContext ctx;
+/// PoreInlineLocation loop_target = 0; // See ** below the example
+/// int error;
+///
+/// . . . // Initialize context
+///
+/// error =
+/// PORE_LOCATION(&ctx, loop_target) ||
+/// pore_LD(&ctx, D0, 0, A0) ||
+/// pore_STD(&ctx, D0, 0, A1) ||
+/// pore_ADDS(&ctx, A0, A0, 8) ||
+/// pore_ADDS(&ctx, A1, A1, 8) ||
+/// pore_LOOP(&ctx, loop_target);
+///
+/// if (error) <. . . Handle error based on ctx.error>
+///
+/// \endcode
+///
+/// The above inline assembler sequence is equivalent to the PGAS code
+/// sequence:
+///
+/// \code
+///
+/// loop_target:
+/// ld D0, 0, A0
+/// std D0, 0, A1
+/// adds A0, A0, 8
+/// adds A1, A1, 8
+/// loop loop_target
+///
+/// \endcode
+///
+/// ** Location counters used as loop targets may need to be initialized,
+/// otherwise the compiler may issue a warning that the variable "may be used
+/// uninitialized", although in well-written code this would never happen.
+///
+///
+/// \subsubsection Forward Branches
+///
+/// Forward branches are more complex. Since the target location counter is
+/// not known until the target has been assembled, the inline assembler
+/// provides the API pore_inline_branch_fixup() to fix up forward branches
+/// once the actual target is known. This is illustrated in the simple code
+/// sequence below, where an instruction is conditionally skipped.
+///
+/// \code
+///
+/// PoreInlineContext ctx;
+/// PoreInlineLocation source = 0, target = 0;
+/// int error, rc;
+///
+/// . . . // Initialize context
+///
+/// error =
+/// PORE_LOCATION(&ctx, source) ||
+/// pore_BRANZ(&ctx, D0, source) ||
+/// pore_ADDS(&ctx, D1, D1, 1) ||
+/// PORE_LOCATION(&ctx, target) ||
+/// pore_LD(&ctx, D0, 0, A0);
+///
+/// if (error) <. . . Handle assembly error based on ctx->error>
+/// rc = pore_inline_branch_fixup(&ctx, source, target);
+/// if (rc) <. . . Handle branch fixup error>
+///
+/// \endcode
+///
+/// In the above code, the branch instruction is initially assembled as a
+/// branch-to-self - the recommended idiom for forward branch source
+/// instructions. Once the entire sequence has been assembled,
+/// pore_inline_branch_fixup() reassembles the \c source instruction as a
+/// branch to the \c target instruction. The above instruction sequence is
+/// equivalent to the PGAS code below:
+///
+/// \code
+///
+/// source:
+/// branz D0, target
+/// adds D1, D1, 1
+/// target:
+/// ld D0, 0, A0
+///
+/// \endcode
+///
+///
+/// \subsubsection Absolute Branches
+///
+/// It is unlikely that a typical application of the PORE inline assembler
+/// would ever need to include an absolute branch, since the branch target in
+/// this case is a fixed absolute address that must be known at assembly
+/// time. However the inline assembler does provide the pore_BRAIA() API for
+/// this purpose. This opcode requires a 16-bit address space constant and a
+/// 32-bit absoulte address (offset) within the memory space to specify the
+/// branch.
+///
+///
+/// \section Disassembly
+///
+/// Inline disassembly is implemented by a single API,
+/// pore_inline_disassemble(). The idea is similar to assembly: A host memory
+/// context containing PORE code (or data) is described by a PoreInlineContext
+/// structure. Each call of pore_inline_disassemble() disassembles the next
+/// instruction (or datum) in the context into a PoreInlineDisassembly
+/// structure provided by the caller. The disassembly object contains both
+/// binary and string forms of the disassembled instruction (or data). The
+/// next call of pore_inline_disassemble() proceses the next instruction (or
+/// datum) and so on.
+///
+/// \subsection Text (Code) Disassembly
+///
+/// In the example below the inline disassembler is used to completely
+/// disassemble a memory area containing text (code) to \a stdout until an
+/// error occurs, assumed to be either due to disassembling the entire memory
+/// area or finding an illegal instruction.
+///
+/// \code
+///
+/// PoreInlineContext ctx;
+/// PoreInlineDisassembly dis;
+///
+/// . . . // Initialize context
+///
+/// while (pore_inline_disassemble(&ctx, &dis) == 0) {
+/// printf("%s\n", dis.s);
+/// }
+///
+/// \endcode
+///
+/// To illustrate binary disassembly, the following example uses the
+/// disassembler to search for a RET statement in a block of PORE code, in
+/// order to extend an inline subroutine with more code. Note that the field
+/// \a dis->ctx contains the context that existed at the time the instruction
+/// was assembled. By copying this context back into the global context,
+/// inline assembly will continue by overwriting the RET with new
+/// instructions. If the copy had \e not been done, then newly assembled code
+/// would have \e followed the RET.
+///
+/// \code
+///
+/// PoreInlineContext ctx;
+/// PoreInlineDisassembly dis;
+///
+/// . . . // Initialize context
+///
+/// while ((pore_inline_disassemble(&ctx, &dis) == 0) &&
+/// (dis.opcode != PORE_OPCODE_RET));
+/// if (ctx.error != 0) {
+/// . . . // Handle error
+/// } else {
+/// pore_inline_context_copy(&ctx, &dis.ctx);
+/// . . . // Continue assembly by overwriting the RET
+/// }
+///
+/// \endcode
+///
+/// A special type of context reset is available to simplify applications that
+/// need to disassemble a just-assembled code sequence, e.g. for debugging.
+/// pore_inline_context_reset_excursion() resets the context such that the
+/// effective size of the context only covers the just-assembled code,
+/// allowing a dissassembly loop to cleanly stop once all code has been
+/// disassembled. The use is illustrated below - note that the disassembly
+/// stops on the expected error code PORE_INLINE_NO_MEMORY once the
+/// (effective) end of the buffer is reached.
+///
+/// \code
+///
+/// PoreInlineContext ctx;
+/// PoreInlineDisassembly dis;
+///
+/// . . . // Initialize context
+/// . . . // Assemble code into context
+///
+/// pore_inline_context_reset_excursion(&ctx);
+///
+/// while (pore_inline_disassemble(&ctx, &dis) == 0) {
+/// printf("%s\n", dis.s);
+/// }
+/// if (ctx.error != PORE_INLINE_NO_MEMORY) {
+/// . . . // Handle error
+/// }
+///
+/// \endcode
+///
+/// \subsection Data Disassembly
+///
+/// If the PoreInlineContext is created with the flag
+/// PORE_INLINE_DISASSEMBLE_DATA, then the context is disassembled as data. If
+/// the PoreInlineContext is created with the flag
+/// PORE_INLINE_DISASSEMBLE_UNKNOWN then putative data embedded in a text
+/// section will be disassembled as data. For complete information see the
+/// documentation for pore_inline_disassemble().
+
+
+#define __PORE_INLINE_ASSEMBLER_C__
+#include "pore_inline.h"
+#undef __PORE_INLINE_ASSEMBLER_C__
+
+// Definitions of PORE register classes. These are predicates that return
+// 1 if the register is a member of the class, else 0.
+
+PORE_STATIC int
+pore_data(int reg)
+{
+ return
+ (reg == D0) ||
+ (reg == D1);
+}
+
+
+PORE_STATIC int
+pore_address(int reg)
+{
+ return
+ (reg == A0) ||
+ (reg == A1);
+}
+
+
+PORE_STATIC int
+pore_pervasive_chiplet_id(int reg)
+{
+ return
+ (reg == P0) ||
+ (reg == P1);
+}
+
+
+PORE_STATIC int
+pore_branch_compare_data(int reg)
+{
+ return
+ (reg == D0) ||
+ (reg == D1) ||
+ (reg == CTR);
+}
+
+
+PORE_STATIC int
+pore_ls_destination(int reg)
+{
+ return
+ (reg == D0) ||
+ (reg == D1) ||
+ (reg == A0) ||
+ (reg == A1) ||
+ (reg == P0) ||
+ (reg == P1) ||
+ (reg == CTR);
+}
+
+
+PORE_STATIC int
+pore_li_destination(int reg)
+{
+ return
+ (reg == D0) ||
+ (reg == D1) ||
+ (reg == A0) ||
+ (reg == A1) ||
+ (reg == P0) ||
+ (reg == P1) ||
+ (reg == CTR);
+}
+
+
+PORE_STATIC int
+pore_mr_source(int reg)
+{
+ return
+ (reg == D0) ||
+ (reg == D1) ||
+ (reg == A0) ||
+ (reg == A1) ||
+ (reg == P0) ||
+ (reg == P1) ||
+ (reg == CTR) ||
+ (reg == PC) ||
+ (reg == ETR) ||
+ (reg == SPRG0) ||
+ (reg == IFR) ||
+ (reg == EMR);
+}
+
+PORE_STATIC int
+pore_mr_destination(int reg)
+{
+ return
+ (reg == D0) ||
+ (reg == D1) ||
+ (reg == A0) ||
+ (reg == A1) ||
+ (reg == P0) ||
+ (reg == P1) ||
+ (reg == CTR) ||
+ (reg == PC) ||
+ (reg == SPRG0)||
+ (reg == EMR);
+}
+
+
+/// Portable store of a 32-bit integer in big-endian format
+///
+/// The address \a p to receive the data is in the form of an unsigned long.
+
+void
+pore_inline_be32(unsigned long p, uint32_t x)
+{
+ uint8_t *p8 = (uint8_t *)p;
+ uint8_t *px = (uint8_t *)(&x);
+ int i, j;
+
+ if (!PORE_BIG_ENDIAN) {
+ for (i = 0, j = 3; i < 4; i++, j--) {
+ p8[i] = px[j];
+ }
+ } else {
+ *((uint32_t *)p) = x;
+ }
+}
+
+
+/// Portable store of a 64-bit integer in big-endian format
+///
+/// The address \a p to receive the data is in the form of an unsigned long.
+
+void
+pore_inline_be64(unsigned long p, uint64_t x)
+{
+ uint8_t *p8 = (uint8_t *)p;
+ uint8_t *px = (uint8_t *)(&x);
+ int i, j;
+
+ if (!PORE_BIG_ENDIAN) {
+ for (i = 0, j = 7; i < 8; i++, j--) {
+ p8[i] = px[j];
+ }
+ } else {
+ *((uint64_t *)p) = x;
+ }
+}
+
+
+// Portable load of a 32-bit integer in big-endian format
+
+uint32_t
+pore_inline_host32(unsigned long p)
+{
+ uint32_t x;
+ uint8_t *p8 = (uint8_t *)p;
+ uint8_t *px = (uint8_t *)(&x);
+ int i, j;
+
+ if (!PORE_BIG_ENDIAN) {
+ for (i = 0, j = 3; i < 4; i++, j--) {
+ px[j] = p8[i];
+ }
+ } else {
+ x = *((uint32_t *)p);
+ }
+
+ return x;
+}
+
+
+// Portable load of a 64-bit integer in big-endian format
+
+uint64_t
+pore_inline_host64(unsigned long p)
+{
+ uint64_t x;
+ uint8_t *p8 = (uint8_t *)p;
+ uint8_t *px = (uint8_t *)(&x);
+ int i, j;
+
+ if (!PORE_BIG_ENDIAN) {
+ for (i = 0, j = 7; i < 8; i++, j--) {
+ px[j] = p8[i];
+ }
+ } else {
+ x = *((uint64_t *)p);
+ }
+
+ return x;
+}
+
+
+// 32-bit population count
+//
+// This is a well-known divide-and-conquer algorithm. The idea is to compute
+// sums of adjacent bit segments in parallel, in place.
+
+PORE_STATIC int
+pore_popcount32(uint32_t x)
+{
+ uint32_t m1 = 0x55555555;
+ uint32_t m2 = 0x33333333;
+ uint32_t m4 = 0x0f0f0f0f;
+ x -= (x >> 1) & m1; /* Sum pairs of bits */
+ x = (x & m2) + ((x >> 2) & m2);/* Sum 4-bit segments */
+ x = (x + (x >> 4)) & m4; /* Sum 8-bit segments */
+ x += x >> 8; /* Sum 16-bit segments */
+ return (x + (x >> 16)) & 0x3f; /* Final sum */
+}
+
+
+// 64-bit population count
+
+PORE_STATIC int
+pore_popcount64(uint64_t x)
+{
+ return pore_popcount32(x & 0xffffffff) + pore_popcount32(x >> 32);
+}
+
+
+// Compute the parity of a PORE instruction as 0 or 1
+
+int
+pore_inline_parity(uint32_t instruction, uint64_t imd64)
+{
+ return (pore_popcount32(instruction) + pore_popcount64(imd64)) % 2;
+}
+
+
+/// Reset a PORE inline assembler context to its creation state
+///
+/// \param ctx A pointer to an initialized (and likely 'used')
+/// PoreInlineContext object.
+///
+/// This API resets a PoreInlineContext object to it's \e creation state, that
+/// is, the state it was in after the call of pore_inline_context_create().
+/// This API is designed for applications that reuse a memory buffer to
+/// assemble multiple PORE code sequences. After each sequence has been fully
+/// assembled and processed, calling pore_inline_context_reset() sets the
+/// context back as it was when the context was initially created so that the
+/// memory area can be reused. In particular, this API resets the location
+/// counter and memory extent to their initial values, and the error code is
+/// cleared. Any options specified at creation remain as they were.
+///
+/// For a slightly different type of reset, see
+/// pore_inline_context_reset_excursion().
+
+void
+pore_inline_context_reset(PoreInlineContext *ctx)
+{
+ ctx->lc_address = ctx->memory;
+ ctx->remaining = ctx->size;
+ ctx->lc = ctx->original_lc;
+ ctx->error = 0;
+}
+
+
+
+/// Reset a PORE inline assembler context to a special state for disassembly
+///
+/// \param ctx A pointer to an initialized (and almost certainly 'used')
+/// PoreInlineContext object.
+///
+/// This API resets a PoreInlineContext object to it's \e creation state, that
+/// is, the state it was in after the call of pore_inline_context_create(), \e
+/// except that the effective size of the memory area has been reduced to the
+/// size that was actually used during assembly. This API is designed for
+/// applications that assemble into a memory buffer and then want to easily
+/// disassemble the code (e.g., for debugging). After a code sequence has
+/// been assembled, calling pore_inline_context_reset_excursion() sets the
+/// context back as it was when the context was initially created, but with a
+/// (typically) shorter effective length, so that the disassembly will cleanly
+/// stop once the entire sequence has been disassembled. Once disassembled,
+/// the buffer can be fully resued after a subsequent call of
+/// pore_inline_context_reset(). In particular, this API resets the location
+/// counter to its initial value, clears the error code, and sets the
+/// effective size of the context to the amount of memory currently used. Any
+/// options specified at creation remain as they were.
+///
+/// For a full context reset see pore_inline_context_reset(). For an example
+/// see the \b Disassembly section of \ref pore_inline_assembler.
+
+void
+pore_inline_context_reset_excursion(PoreInlineContext *ctx)
+{
+ ctx->lc_address = ctx->memory;
+ ctx->remaining = ctx->size - ctx->remaining;
+ ctx->lc = ctx->original_lc;
+ ctx->error = 0;
+}
+
+
+/// Create a PORE inline assembler context
+///
+/// \param ctx A pointer to a PoreInlineContext object to be initialized
+/// and used for inline assembly. or disassembly.
+///
+/// \param memory A pointer to the host memory area to receive the assembled
+/// code, or contain the code to disassemble. In general the inline assembler
+/// will expect this memory area to be 4-byte aligned. This pointer may be
+/// NULL (0) only if the associated \a size is also 0.
+///
+/// \param size The size (in bytes) of the host memory area. The inline
+/// assembler will generate the PORE_INLINE_NO_MEMORY error if an attempt is
+/// made to assemble an instruction that would overflow the buffer, or
+/// disassemble past the end of the buffer. A 0 size is valid.
+///
+/// \param lc The initial, bytewise, target location counter for the assembled
+/// or disassembled code. This paramater will normally be initialized to 0 for
+/// assembling relocatable programs. The parameter would only need to be
+/// specified as non-0 for special cases, such as creating a context for
+/// disassembly.
+///
+/// \param options Option flags. Option flags are OR-ed together to create
+/// the final set of options. Valid options are
+///
+/// - PORE_INLINE_GENERATE_PARITY : Generate the proper parity bit for each
+/// instruction during assembly.
+///
+/// - PORE_INLINE_CHECK_PARITY : Check for correct instruction parity during
+/// disassembly.
+///
+/// - PORE_INLINE_LISTING_MODE : Generate disassembly strings in the form of a
+/// listing that contains location counters and encoded instructions as well
+/// as their diassembly. By default the disassembly strings do not contain
+/// this information and can be fed back in as source code to a PORE
+/// assembler.
+///
+/// - PORE_INLINE_DISASSEMBLE_DATA : generate disassembly assuming that the
+/// context contains data rather than text. Normally data is disassembled as
+/// .long directives, however if the context is unaligned or of an odd length
+/// then .byte directives may be used as well. This option can be used in
+/// conjunction with PORE_INLINE_LISTING_MODE.
+///
+/// - PORE_INLINE_8_BYTE_DATA : generate data disassembly using 8-byte values
+/// rather than the default 4-byte values. Normally data is disassembled as
+/// .quad directives under this option, however if the context is unaligned or
+/// of an odd length then .long and .byte directives may be used as well.
+/// This option can be used in conjunction with PORE_INLINE_LISTING_MODE.
+///
+/// A PoreInlineContext describes a memory area and assembler context for
+/// inline assembly and disassembly. Assembly/disassembly begins at the host
+/// memory location and virtual location counter described in the parameters.
+/// As instructions are assembled/disassembled the PoreInlineContext keeps
+/// track of where in the host memory and virtual PORE memory areas to place
+/// new instructions during assembly, or from where to fetch the next
+/// instruction to disassemble.
+///
+/// \retval 0 Success
+///
+/// \retval PORE_INLINE_INVALID_PARAMETER Either the \a context pointer is
+/// NULL (0), the \a memory pointer is NULL (0) with a non-0 size, or the \a
+/// options include invalid options. The error code is also stored as the
+/// value of ctx->error, and in the event of an error the ctx->size field is
+/// set to 0, effectively preventing the context from being used.
+
+int
+pore_inline_context_create(PoreInlineContext *ctx,
+ void *memory, size_t size,
+ PoreInlineLocation lc, int options)
+{
+ int rc;
+
+ int valid_options =
+ PORE_INLINE_GENERATE_PARITY |
+ PORE_INLINE_CHECK_PARITY |
+ PORE_INLINE_LISTING_MODE |
+ PORE_INLINE_DISASSEMBLE_DATA |
+ PORE_INLINE_8_BYTE_DATA |
+ PORE_INLINE_DISASSEMBLE_UNKNOWN;
+
+ if ((ctx == 0) || ((memory == 0) && (size != 0)) ||
+ ((options & ~valid_options) != 0)) {
+ rc = PORE_INLINE_INVALID_PARAMETER;
+ } else {
+ rc = 0;
+ ctx->memory = (unsigned long)memory;
+ ctx->size = size;
+ ctx->original_lc = lc;
+ ctx->options = options;
+ pore_inline_context_reset(ctx);
+ }
+
+ if (ctx != 0) {
+ ctx->error = rc;
+ if (rc) {
+ ctx->size = 0; /* Effectively prevents using the ctx */
+ }
+ }
+
+ return rc;
+}
+
+
+/// Copy a PORE inline assembler context
+///
+/// \param dest A pointer to a PoreInlineContext object to be initialized
+/// as a copy of the \a src context.
+///
+/// \param src A pointer to a PoreInlineContext object to be used as the
+/// source of the copy.
+///
+/// This API copies one PoreInlineContext structure to another. An example
+/// use appears in \ref pore_inline_assembler in the section discussing
+/// disassembly.
+
+void
+pore_inline_context_copy(PoreInlineContext *dest, PoreInlineContext *src)
+{
+ *dest = *src;
+}
+
+
+// 'Bump' a context forward by a given number of bytes. This an internal API
+// and the bump is always known to be legal.
+
+void
+pore_inline_context_bump(PoreInlineContext *ctx, size_t bytes)
+{
+ ctx->remaining -= bytes;
+ ctx->lc += bytes;
+ ctx->lc_address += bytes;
+}
+
+
+// Allocate space in the inline assembler context
+//
+// Allocation is specified and implemented in bytes. Both the physical
+// memory and the virtual LC are required to be 4-byte aligned. The allocator
+// returns a pointer to the memory area, or 0 if allocation fails.
+// Allocation failure sets the context error code to either
+// PORE_INLINE_NO_MEMORY or PORE_INLINE_ALIGNMENT_ERROR.
+
+PORE_STATIC unsigned long
+pore_inline_allocate(PoreInlineContext *ctx, size_t bytes)
+{
+ unsigned long p = 0;
+
+ if (((ctx->lc % 4) != 0) ||
+ ((ctx->lc_address % 4) != 0)) {
+ ctx->error = PORE_INLINE_ALIGNMENT_ERROR;
+
+ } else if (bytes > ctx->remaining) {
+ ctx->error = PORE_INLINE_NO_MEMORY;
+
+ } else {
+ p = ctx->lc_address;
+ pore_inline_context_bump(ctx, bytes);
+ }
+ return p;
+}
+
+
+// Assemble a 1-word instruction
+//
+// The opcode and operand are assumed to be legal, having come from
+// abstractions that check their arguments. This call may fail with
+// PORE_INLINE_NO_MEMORY if there is no more room in the memory buffer. A
+// non-zero return indicates failure.
+
+int
+pore_inline_instruction1(PoreInlineContext *ctx, int opcode, uint32_t operand)
+{
+ uint32_t instruction;
+ unsigned long p;
+
+ p = pore_inline_allocate(ctx, 4);
+ if (p != 0) {
+
+ instruction = (opcode << 25) | operand;
+ if (ctx->options & PORE_INLINE_GENERATE_PARITY) {
+ instruction |= (1 - pore_inline_parity(instruction, 0)) << 24;
+ }
+
+ pore_inline_be32(p, instruction);
+ ctx->error = 0;
+ }
+ return p == 0;
+}
+
+
+// Assemble a 3-word instruction
+//
+// The opcode and operand are assumed to be legal, having come from
+// abstractions that check their arguments. This call may fail with
+// PORE_INLINE_NO_MEMORY if there is no more room in the memory buffer. A
+// non-zero return indicates failure.
+
+int
+pore_inline_instruction3(PoreInlineContext *ctx, int opcode, uint32_t operand,
+ uint64_t immediate)
+{
+ uint32_t instruction;
+ unsigned long p;
+
+ p = pore_inline_allocate(ctx, 12);
+ if (p != 0) {
+
+ instruction = (opcode << 25) | operand;
+ if (ctx->options & PORE_INLINE_GENERATE_PARITY) {
+ instruction |= (1 - pore_inline_parity(instruction, immediate)) << 24;
+ }
+
+ pore_inline_be32(p, instruction);
+ pore_inline_be64(p + 4, immediate);
+ ctx->error = 0;
+ }
+ return p == 0;
+}
+
+
+// Assemble WAIT
+//
+// The cycle count must be an unsigned 24-bit immediate otherwise the error
+// PORE_INLINE_UINT24_REQUIRED is signalled. PGAS requires that HALT be used
+// if the intention is to halt
+
+int
+pore_WAITS(PoreInlineContext *ctx, uint32_t cycles)
+{
+ uint32_t operand;
+ int opcode = PGAS_OPCODE_WAITS;
+
+ if (cycles == 0) {
+ ctx->error = PORE_INLINE_USE_HALT;
+ } else if ((cycles & 0xffffff) != cycles) {
+ ctx->error = PORE_INLINE_UINT24_REQUIRED;
+ } else {
+ operand = cycles;
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ return ctx->error;
+}
+
+
+// Assemble HOOKI
+//
+// The hook index must be an unsigned 24-bit immediate otherwise the error
+// PORE_INLINE_UINT24_REQUIRED is signalled.
+
+int
+pore_HOOKI(PoreInlineContext *ctx, uint32_t index, uint64_t imm)
+{
+ uint32_t operand;
+ int opcode = PGAS_OPCODE_HOOKI;
+
+ if ((index & 0xffffff) != index) {
+ ctx->error = PORE_INLINE_UINT24_REQUIRED;
+ } else {
+ operand = index;
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+ }
+ return ctx->error;
+}
+
+
+// Assemble BRA, BSR and LOOP
+//
+// The branch target here is a bytewise location counter. The target must be
+// 4-byte aligned and must be within the legal signed 24-bit word offset of
+// the current LC. Unaligned targets cause PORE_INLINE_ALIGNMENT_ERROR.
+// Unreachable targets cause PORE_INLINE_UNREACHABLE_TARGET.
+
+int
+pore_inline_bra(PoreInlineContext *ctx, int opcode, PoreInlineLocation target)
+{
+ int32_t offset;
+ uint32_t operand;
+
+ if (target % 4) {
+ ctx->error = PORE_INLINE_ALIGNMENT_ERROR;
+ } else {
+ offset = (int32_t)(target - ctx->lc) / 4;
+ if ((offset >= (1 << 23)) ||
+ (offset < -(1 << 23))) {
+ ctx->error = PORE_INLINE_UNREACHABLE_TARGET;
+ } else {
+ operand = offset & 0xffffff;
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ }
+ return ctx->error;
+}
+
+
+// Assemble BRAZ and BRANZ
+//
+// The branch target here is a bytewise location counter. The target must be
+// 4-byte aligned and must be within the legal signed 20-bit word offset of
+// the current LC. Unaligned targets cause PORE_INLINE_ALIGNMENT_ERROR.
+// Unreachable targets cause PORE_INLINE_UNREACHABLE_TARGET. Illegal
+// operands cause PORE_INLINE_ILLEGAL_REGISTER.
+
+int
+pore_inline_brac(PoreInlineContext *ctx, int opcode, int reg,
+ PoreInlineLocation target)
+{
+ int32_t offset;
+ uint32_t operand;
+
+ if (target % 4) {
+ ctx->error = PORE_INLINE_ALIGNMENT_ERROR;
+ } else if (!pore_branch_compare_data(reg)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ offset = (int32_t)(target - ctx->lc) / 4;
+ if ((offset >= (1 << 20)) ||
+ (offset < -(1 << 20))) {
+ ctx->error = PORE_INLINE_UNREACHABLE_TARGET;
+ } else {
+ operand = (offset & 0xfffff) | (reg << 20);
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ }
+ return ctx->error;
+}
+
+
+// Assemble CMPIBRAEQ, CMPIBRANE, CMPIBSREQ
+//
+// The branch target here is a bytewise location counter. The target must be
+// 4-byte aligned and must be within the legal signed 24-bit word offset of
+// the current LC. Unaligned targets cause PORE_INLINE_ALIGNMENT_ERROR.
+// Unreachable targets cause PORE_INLINE_UNREACHABLE_TARGET. Illegal
+// operands cause PORE_INLINE_ILLEGAL_REGISTER.
+
+int
+pore_inline_cmpibra(PoreInlineContext *ctx, int opcode, int reg,
+ PoreInlineLocation target, uint64_t imm)
+{
+ int32_t offset;
+ uint32_t operand;
+
+ if (target % 4) {
+ ctx->error = PORE_INLINE_ALIGNMENT_ERROR;
+ } else if (reg != D0) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ offset = (int32_t)(target - ctx->lc) / 4;
+ if ((offset >= (1 << 23)) ||
+ (offset < -(1 << 23))) {
+ ctx->error = PORE_INLINE_UNREACHABLE_TARGET;
+ } else {
+ operand = offset & 0xffffff;
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+ }
+ }
+ return ctx->error;
+}
+
+
+// Assemble BRAD and BSRD
+//
+// Illegal operands cause PORE_INLINE_ILLEGAL_REGISTER.
+
+int
+pore_inline_brad(PoreInlineContext *ctx, int opcode, int reg)
+{
+ uint32_t operand;
+
+ if (!pore_data(reg)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ operand = reg << 20;
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ return ctx->error;
+}
+
+
+// Assemble ANDI, ORI, XORI
+//
+// Source and destination must be of class 'data' otherwise the
+// PORE_INLINE_ILLEGAL_REGISTER error is generated.
+
+int
+pore_inline_ilogic(PoreInlineContext *ctx, int opcode,
+ int dest, int src, uint64_t imm)
+{
+ uint32_t operand;
+
+ if (!pore_data(dest) || !pore_data(src)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ operand = (dest << 20) | (src << 16);
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+ }
+ return ctx->error;
+}
+
+
+// Assemble AND, OR, XOR, ADD, SUB
+//
+// Destination must be of class 'data' otherwise the
+// PORE_INLINE_ILLEGAL_REGISTER error is generated. src1 and src2 must be D0,
+// D1 respectively otherwise the PORE_INLINE_ILLEGAL_REGISTER error is
+// generated.
+
+int
+pore_inline_alurr(PoreInlineContext *ctx,
+ int opcode, int dest, int src1, int src2)
+{
+ uint32_t operand;
+
+ if (!pore_data(dest) || (src1 != D0) || (src2 != D1)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ operand = (dest << 20);
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ return ctx->error;
+}
+
+
+// Assemble ADDS and SUBS
+//
+// Destination must be of class 'ls_destination' and must be equal to source,
+// otherwise the PORE_INLINE_ILLEGAL_REGISTER error is generated. If the
+// immediate is not a signed 16-bit immediate then the
+// PORE_INLINE_INT16_REQUIRED error is generated.
+
+int
+pore_inline_adds(PoreInlineContext *ctx,
+ int opcode, int dest, int src, int imm)
+{
+ uint32_t operand;
+
+ if (!pore_ls_destination(dest) || (dest != src)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ if ((imm >= (1 << 15)) ||
+ (imm < -(1 << 15))) {
+ ctx->error = PORE_INLINE_INT16_REQUIRED;
+ } else {
+ operand = (dest << 20) | (imm & 0xffff);
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ }
+ return ctx->error;
+}
+
+
+// Assemble NEG
+//
+// Source and destination must be of class 'data' otherwise the
+// PORE_INLINE_ILLEGAL_REGISTER error is generated.
+
+int
+pore_NEG(PoreInlineContext *ctx, int dest, int src)
+{
+ uint32_t operand;
+ int opcode = PGAS_OPCODE_NEG;
+
+ if (!pore_data(dest) || !pore_data(src)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ operand = (dest << 20) | (src << 16);
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ return ctx->error;
+}
+
+
+// Assemble MR
+//
+// The source must be an 'mr_source' and the destination must be an
+// 'mr_destination' otherwise the PORE_INLINE_ILLEGAL_REGISTER error is
+// generated.
+
+int
+pore_MR(PoreInlineContext *ctx, int dest, int src)
+{
+ uint32_t operand;
+ int opcode = PGAS_OPCODE_MR;
+
+ if (!pore_mr_destination(dest) || !pore_mr_source(src)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ operand = (dest << 20) | (src << 16);
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ return ctx->error;
+}
+
+
+
+// Assemble ROLS
+//
+// Source and destination must be of class 'data' otherwise the
+// PORE_INLINE_ILLEGAL_REGISTER error is generated. Illegal shifts yield the
+// PORE_INLINE_ILLEGAL_ROTATE error.
+
+int
+pore_ROLS(PoreInlineContext *ctx, int dest, int src, int imm)
+{
+ uint32_t operand;
+ int opcode = PGAS_OPCODE_ROLS;
+
+ if (!pore_data(dest) || !pore_data(src)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else if ((imm != 1) &&
+ (imm != 4) &&
+ (imm != 8) &&
+ (imm != 16) &&
+ (imm != 32)) {
+ ctx->error = PORE_INLINE_ILLEGAL_ROTATE;
+ } else {
+ operand = (dest << 20) | (src << 16) | imm;
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ return ctx->error;
+}
+
+
+// Assemble LS
+//
+// The destination must be an 'ls_destination' otherwise the
+// PORE_INLINE_ILLEGAL_REGISTER error is generated. If the immediate is not
+// a signed 20-bit immediate then the PORE_INLINE_INT20_REQUIRED error is
+// generated.
+
+int
+pore_LS(PoreInlineContext *ctx, int dest, int imm)
+{
+ uint32_t operand;
+ int opcode = PGAS_OPCODE_LS;
+
+ if (!pore_ls_destination(dest)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else if ((imm >= (1 << 19)) ||
+ (imm < -(1 << 19))) {
+ ctx->error = PORE_INLINE_INT20_REQUIRED;
+ } else {
+ operand = (dest << 20) | (imm & 0xfffff);
+ pore_inline_instruction1(ctx, opcode, operand);
+ }
+ return ctx->error;
+}
+
+
+// Assemble LI
+//
+// The destination must be an 'li destination' otherwise the
+// PORE_INLINE_ILLEGAL_REGISTER error is generated.
+
+int
+pore_LI(PoreInlineContext *ctx, int dest, uint64_t imm)
+{
+ uint32_t operand;
+ int opcode = PGAS_OPCODE_LI;
+
+ if (!pore_li_destination(dest)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ operand = dest << 20;
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+ }
+ return ctx->error;
+}
+
+
+// BSI and BCI are normally redacted as instructions due to HW274735
+
+// LD, LDANDI, STD, STI, BSI, BCI
+
+PORE_STATIC void
+pervasive_ima24(PoreInlineContext *ctx,
+ int opcode, uint32_t offset, int base, uint64_t imm)
+{
+ uint32_t operand;
+
+ if ((offset & 0x80f00000) != 0) {
+ ctx->error = PORE_INLINE_ILLEGAL_SCOM_ADDRESS;
+ } else {
+ operand = ((base % 2) << 22) | (offset & 0xfffff);
+ switch (opcode) {
+ case PGAS_OPCODE_LD0:
+ case PGAS_OPCODE_LD1:
+ case PGAS_OPCODE_STD0:
+ case PGAS_OPCODE_STD1:
+ pore_inline_instruction1(ctx, opcode, operand);
+ break;
+ default:
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+ break;
+ }
+ }
+}
+
+
+PORE_STATIC void
+memory_ima24(PoreInlineContext *ctx,
+ int opcode, uint32_t offset, int base, uint64_t imm)
+{
+ uint32_t operand;
+
+ if ((offset & 0x3fffff) != offset) {
+ ctx->error = PORE_INLINE_UINT22_REQUIRED;
+ } else if ((offset % 8) != 0) {
+ ctx->error = PORE_INLINE_ALIGNMENT_ERROR;
+ } else {
+ operand = 0x800000 | ((base % 2) << 22) | (offset & 0x3fffff);
+ switch (opcode) {
+ case PGAS_OPCODE_LD0:
+ case PGAS_OPCODE_LD1:
+ case PGAS_OPCODE_STD0:
+ case PGAS_OPCODE_STD1:
+ pore_inline_instruction1(ctx, opcode, operand);
+ break;
+ default:
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+ break;
+ }
+ }
+}
+
+
+PORE_STATIC void
+ima24(PoreInlineContext *ctx,
+ int opcode, uint32_t offset, int base, uint64_t imm)
+{
+ if (pore_pervasive_chiplet_id(base)) {
+ pervasive_ima24(ctx, opcode, offset, base, imm);
+ } else if (pore_address(base)) {
+ memory_ima24(ctx, opcode, offset, base, imm);
+ } else {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ }
+}
+
+
+int
+pore_inline_load_store(PoreInlineContext *ctx,
+ int opcode, int src_dest, int32_t offset, int base,
+ uint64_t imm)
+{
+ switch (opcode) {
+
+ case PORE_INLINE_PSEUDO_LD:
+ case PORE_INLINE_PSEUDO_LDANDI:
+ case PORE_INLINE_PSEUDO_STD:
+
+ // These three pick the real opcode based on the dest. register
+
+ if (!pore_data(src_dest)) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ } else {
+ switch (opcode) {
+ case PORE_INLINE_PSEUDO_LD:
+ opcode = (src_dest == D0) ?
+ PGAS_OPCODE_LD0 : PGAS_OPCODE_LD1;
+ break;
+ case PORE_INLINE_PSEUDO_LDANDI:
+ opcode = (src_dest == D0) ?
+ PGAS_OPCODE_LD0ANDI : PGAS_OPCODE_LD1ANDI;
+ break;
+ case PORE_INLINE_PSEUDO_STD:
+ opcode = (src_dest == D0) ?
+ PGAS_OPCODE_STD0 : PGAS_OPCODE_STD1;
+ break;
+ }
+ }
+ break;
+
+#ifdef IGNORE_HW274735
+
+ // BSI and BCI are normally redacted as instructions due to HW274735
+
+ case PGAS_OPCODE_BSI:
+ case PGAS_OPCODE_BCI:
+
+ if (src_dest != D0) {
+ ctx->error = PORE_INLINE_ILLEGAL_REGISTER;
+ }
+ break;
+
+#endif // IGNORE_HW274735
+
+ case PGAS_OPCODE_STI:
+ break;
+
+ default:
+ ctx->error = PORE_INLINE_BUG;
+ }
+
+ if (ctx->error == 0) {
+ ima24(ctx, opcode, offset, base, imm);
+ }
+
+ return ctx->error;
+}
+
+
+// Assemble BRAIA
+
+int
+pore_BRAIA(PoreInlineContext *ctx,
+ uint16_t address_space, uint32_t offset)
+{
+ int opcode = PGAS_OPCODE_BRAI;
+ uint32_t operand = 0;
+ uint64_t imm = ((uint64_t)address_space << 32) | offset;
+
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+
+ return ctx->error;
+}
+
+
+// Assemble SCAND
+
+int
+pore_SCAND(PoreInlineContext *ctx,
+ int update, int capture, uint16_t length,
+ uint32_t select, uint32_t offset)
+{
+ int opcode = PGAS_OPCODE_SCAND;
+ uint32_t operand;
+ uint64_t imm = ((uint64_t)select << 32) | offset;
+
+ if ((update < 0) ||
+ (update > 1) ||
+ (capture < 0) ||
+ (capture > 1)) {
+ ctx->error = PORE_INLINE_INVALID_PARAMETER;
+ } else {
+ opcode = PGAS_OPCODE_SCAND;
+ operand = (update << 23) | (capture << 22) | length;
+ pore_inline_instruction3(ctx, opcode, operand, imm);
+ }
+ return ctx->error;
+}
+
+
+/// Fix up a PORE inline assembler forward branch instruction
+///
+/// \param ctx A pointer to the initialized PoreInlineContext object
+/// controlling inline assembly.
+///
+/// \param source The PORE inline location counter associated with the source
+/// instruction of the forward branch.
+///
+/// \param target The PORE inline location counter associated with the target
+/// instruction of the forward branch.
+///
+/// For usage examples, see the documentation \ref pore_inline_assembler.
+/// Although intended for forward branches, this API could be used to create
+/// backward branches as well. Note however the limitation that the \a source
+/// must be in the current context, since the source instruction needs to be
+/// reassembled with the branch target. In theory the \a target could be
+/// anywhere, as long as the location counter of the target is known.
+///
+/// \retval 0 Success
+///
+/// \retval code Failure. Any non-zero return is the PORE inline assmebler
+/// error code. The failure code is also stored in the PoreInlineContext
+/// object \a error field. The most likely causes of failure include a source
+/// location that is not in the current context or not associated with a
+/// branch instruction.
+
+int
+pore_inline_branch_fixup(PoreInlineContext *ctx,
+ PoreInlineLocation source,
+ PoreInlineLocation target)
+{
+ uint32_t instruction;
+ int32_t distance;
+ uint64_t imm;
+ int opcode, reg;
+ PoreInlineContext source_ctx;
+
+ if ((source < ctx->original_lc) ||
+ (source > ctx->lc)) {
+ ctx->error = PORE_INLINE_ILLEGAL_SOURCE_LC;
+ } else {
+
+ // Create a context as it existed when the source instruction was
+ // initially assembled, and then reassemble the instruction in that
+ // context with the actual target.
+
+ distance = ctx->lc - source;
+
+ source_ctx = *ctx;
+ source_ctx.lc = source;
+ source_ctx.remaining += distance;
+ source_ctx.lc_address -= distance;
+ source_ctx.error = 0;
+
+ instruction = pore_inline_host32(source_ctx.lc_address);
+ opcode = (instruction >> 25);
+ reg = (instruction >> 20) & 0xf;
+
+ switch (opcode) {
+ case PGAS_OPCODE_BRA:
+ pore_BRA(&source_ctx, target);
+ break;
+ case PGAS_OPCODE_BSR:
+ pore_BSR(&source_ctx, target);
+ break;
+ case PGAS_OPCODE_LOOP:
+ pore_LOOP(&source_ctx, target);
+ break;
+ case PGAS_OPCODE_BRAZ:
+ pore_BRAZ(&source_ctx, reg, target);
+ break;
+ case PGAS_OPCODE_BRANZ:
+ pore_BRANZ(&source_ctx, reg, target);
+ break;
+ case PGAS_OPCODE_CMPIBRAEQ:
+ imm = pore_inline_host64(source_ctx.lc_address + 4);
+ pore_CMPIBRAEQ(&source_ctx, D0, target, imm);
+ break;
+ case PGAS_OPCODE_CMPIBRANE:
+ imm = pore_inline_host64(source_ctx.lc_address + 4);
+ pore_CMPIBRANE(&source_ctx, D0, target, imm);
+ break;
+ case PGAS_OPCODE_CMPIBSREQ:
+ imm = pore_inline_host64(source_ctx.lc_address + 4);
+ pore_CMPIBSREQ(&source_ctx, D0, target, imm);
+ break;
+ default:
+ source_ctx.error = PORE_INLINE_NOT_A_BRANCH;
+ break;
+ }
+
+ ctx->error = source_ctx.error;
+ }
+ return ctx->error;
+}
diff --git a/libpore/sbe_xip_image.c b/libpore/sbe_xip_image.c
new file mode 100644
index 0000000..2cbe96e
--- /dev/null
+++ b/libpore/sbe_xip_image.c
@@ -0,0 +1,2562 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.c $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+// $Id: sbe_xip_image.c,v 1.28 2013/12/11 00:12:41 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/sbe/sbe_xip_image.c,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2011
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+// *! OWNER NAME: Bishop Brock Email: bcbrock@us.ibm.com
+//------------------------------------------------------------------------------
+
+/// \file sbe_xip_image.c
+/// \brief APIs for validating, normalizing, searching and manipulating
+/// SBE-XIP images.
+///
+/// The background, APIs and implementation details are documented in the
+/// document "SBE-XIP Binary format" currently available at this link:
+///
+/// - https://mcdoc.boeblingen.de.ibm.com/out/out.ViewDocument.php?documentid=2678
+///
+/// \bug The sbe_xip_validate() API should be carefully reviewed to ensure
+/// that validating even a corrupt image can not lead to a segfault, i.e., to
+/// ensure that no memory outside of the putative bounds of the image is ever
+/// referenced during validation.
+
+#ifndef PLIC_MODULE
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#endif // PLIC_MODULE
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include "sbe_xip_image.h"
+
+
+////////////////////////////////////////////////////////////////////////////
+// Local Functions
+////////////////////////////////////////////////////////////////////////////
+
+// PHYP has their own way of implementing the <string.h> functions. PHYP also
+// does not allow static functions or data, so all of the XIP_STATIC functions
+// defined here are global to PHYP.
+
+#ifdef PPC_HYP
+
+#ifdef PLIC_MODULE
+
+#define strcpy(dest, src) hvstrcpy(dest, src)
+#define strlen(s) hvstrlen(s)
+#define strcmp(s1, s2) hvstrcmp(s1, s2)
+#endif //PLIC_MODULE
+
+#define XIP_STATIC
+
+#else // PPC_HYP
+
+#define XIP_STATIC static
+
+#endif // PPC_HYP
+
+
+#ifdef DEBUG_SBE_XIP_IMAGE
+
+// Debugging support, normally disabled. All of the formatted I/O you see in
+// the code is effectively under this switch.
+
+#ifdef __FAPI
+
+#include "fapi.H"
+#define fprintf(stream, ...) FAPI_ERR(__VA_ARGS__)
+#define printf(...) FAPI_INF(__VA_ARGS__)
+#define TRACE_NEWLINE ""
+
+#else // __FAPI
+
+#include <stdio.h>
+#define TRACE_NEWLINE "\n"
+
+#endif // __FAPI
+
+// Portable formatting of uint64_t. The ISO C99 standard requires
+// __STDC_FORMAT_MACROS to be defined in order for PRIx64 etc. to be defined.
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
+#define F0x016llx "0x%016" PRIx64
+#define F0x012llx "0x%012" PRIx64
+
+XIP_STATIC SBE_XIP_ERROR_STRINGS(sbe_xip_error_strings);
+
+#define TRACE_ERROR(x) \
+ ({ \
+ fprintf(stderr, "%s:%d : Returning error code %d : %s" TRACE_NEWLINE, \
+ __FILE__, __LINE__, (x), \
+ SBE_XIP_ERROR_STRING(sbe_xip_error_strings, (x))); \
+ (x); \
+ })
+
+#define TRACE_ERRORX(x, ...) \
+ ({ \
+ TRACE_ERROR(x); \
+ fprintf(stderr, ##__VA_ARGS__); \
+ (x); \
+ })
+
+
+// Uncomment these if required for debugging, otherwise we get warnings from
+// GCC as they are not otherwise used.
+
+#if 0
+
+XIP_STATIC uint32_t xipRevLe32(const uint32_t i_x);
+
+XIP_STATIC SBE_XIP_TYPE_STRINGS(type_strings);
+
+XIP_STATIC void
+dumpToc(int index, SbeXipToc* toc)
+{
+ printf("TOC entry %d @ %p\n"
+ " iv_id = 0x%08x\n"
+ " iv_data = 0x%08x\n"
+ " iv_type = %s\n"
+ " iv_section = 0x%02x\n"
+ " iv_elements = %d\n",
+ index, toc,
+ xipRevLe32(toc->iv_id),
+ xipRevLe32(toc->iv_data),
+ SBE_XIP_TYPE_STRING(type_strings, toc->iv_type),
+ toc->iv_section,
+ toc->iv_elements);
+}
+
+#endif
+
+#if 0
+
+XIP_STATIC void
+dumpItem(SbeXipItem* item)
+{
+ printf("SbeXipItem @ %p\n"
+ " iv_toc = %p\n"
+ " iv_address = " F0x016llx "\n"
+ " iv_imageData = %p\n"
+ " iv_id = %s\n"
+ " iv_type = %s\n"
+ " iv_elements = %d\n",
+ item,
+ item->iv_toc,
+ item->iv_address,
+ item->iv_imageData,
+ item->iv_id,
+ SBE_XIP_TYPE_STRING(type_strings, item->iv_type),
+ item->iv_elements);
+ dumpToc(-1, item->iv_toc);
+}
+
+#endif /* 0 */
+
+XIP_STATIC void
+dumpSectionTable(const void* i_image)
+{
+ int i, rc;
+ SbeXipSection section;
+
+ printf("Section table dump of image @ %p\n"
+ " Entry Offset Size\n"
+ "-------------------------------\n",
+ i_image);
+
+ for (i = 0; i < SBE_XIP_SECTIONS; i++) {
+ rc = sbe_xip_get_section(i_image, i, &section);
+ if (rc) {
+ printf(">>> dumpSectionTable got error at entry %d : %s\n",
+ i, SBE_XIP_ERROR_STRING(sbe_xip_error_strings, rc));
+ break;
+ }
+ printf("%7d 0x%08x 0x%08x\n",
+ i, section.iv_offset, section.iv_size);
+ }
+}
+
+#else
+
+#define TRACE_ERROR(x) (x)
+#define TRACE_ERRORX(x, ...) (x)
+#define dumpToc(...)
+#define dumpItem(...)
+#define dumpSectionTable(...)
+
+#endif
+
+
+// Note: For maximum flexibility we provide private versions of
+// endian-conversion routines rather than counting on a system-specific header
+// to provide these.
+
+/// Byte-reverse a 16-bit integer if on a little-endian machine
+
+XIP_STATIC uint16_t
+xipRevLe16(const uint16_t i_x)
+{
+ uint16_t rx;
+
+#ifndef _BIG_ENDIAN
+ uint8_t *pix = (uint8_t*)(&i_x);
+ uint8_t *prx = (uint8_t*)(&rx);
+
+ prx[0] = pix[1];
+ prx[1] = pix[0];
+#else
+ rx = i_x;
+#endif
+
+ return rx;
+}
+
+
+/// Byte-reverse a 32-bit integer if on a little-endian machine
+
+XIP_STATIC uint32_t
+xipRevLe32(const uint32_t i_x)
+{
+ uint32_t rx;
+
+#ifndef _BIG_ENDIAN
+ uint8_t *pix = (uint8_t*)(&i_x);
+ uint8_t *prx = (uint8_t*)(&rx);
+
+ prx[0] = pix[3];
+ prx[1] = pix[2];
+ prx[2] = pix[1];
+ prx[3] = pix[0];
+#else
+ rx = i_x;
+#endif
+
+ return rx;
+}
+
+
+/// Byte-reverse a 64-bit integer if on a little-endian machine
+
+XIP_STATIC uint64_t
+xipRevLe64(const uint64_t i_x)
+{
+ uint64_t rx;
+
+#ifndef _BIG_ENDIAN
+ uint8_t *pix = (uint8_t*)(&i_x);
+ uint8_t *prx = (uint8_t*)(&rx);
+
+ prx[0] = pix[7];
+ prx[1] = pix[6];
+ prx[2] = pix[5];
+ prx[3] = pix[4];
+ prx[4] = pix[3];
+ prx[5] = pix[2];
+ prx[6] = pix[1];
+ prx[7] = pix[0];
+#else
+ rx = i_x;
+#endif
+
+ return rx;
+}
+
+
+/// What is the image link address?
+
+XIP_STATIC uint64_t
+xipLinkAddress(const void* i_image)
+{
+ return xipRevLe64(((SbeXipHeader*)i_image)->iv_linkAddress);
+}
+
+
+/// What is the image size?
+
+XIP_STATIC uint32_t
+xipImageSize(const void* i_image)
+{
+ return xipRevLe32(((SbeXipHeader*)i_image)->iv_imageSize);
+}
+
+
+/// Set the image size
+
+XIP_STATIC void
+xipSetImageSize(void* io_image, const size_t i_size)
+{
+ ((SbeXipHeader*)io_image)->iv_imageSize = xipRevLe32(i_size);
+}
+
+
+/// Re-establish the required final alignment
+
+XIP_STATIC void
+xipFinalAlignment(void* io_image)
+{
+ uint32_t size;
+
+ size = xipImageSize(io_image);
+
+ if ((size % SBE_XIP_FINAL_ALIGNMENT) != 0) {
+ xipSetImageSize(io_image,
+ size + (SBE_XIP_FINAL_ALIGNMENT -
+ (size % SBE_XIP_FINAL_ALIGNMENT)));
+ }
+}
+
+
+/// Compute a host address from an image address and offset
+
+XIP_STATIC void*
+xipHostAddressFromOffset(const void* i_image, const uint32_t offset)
+{
+ return (void*)((unsigned long)i_image + offset);
+}
+
+
+/// Convert a PORE address to a host address
+
+XIP_STATIC void*
+xipPore2Host(const void* i_image, const uint64_t i_poreAddress)
+{
+ return xipHostAddressFromOffset(i_image,
+ i_poreAddress - xipLinkAddress(i_image));
+}
+
+
+XIP_STATIC int
+xipValidatePoreAddress(const void* i_image,
+ const uint64_t i_poreAddress,
+ const uint32_t size)
+{
+ int rc;
+
+ if ((i_poreAddress < xipLinkAddress(i_image)) ||
+ (i_poreAddress > (xipLinkAddress(i_image) +
+ xipImageSize(i_image) -
+ size))) {
+ rc = TRACE_ERRORX(SBE_XIP_INVALID_ARGUMENT,
+ "The PORE address " F0x012llx
+ " is outside the bounds "
+ "of the image ("
+ F0x012llx ":" F0x012llx
+ ") for %u-byte access.\n",
+ i_poreAddress,
+ xipLinkAddress(i_image),
+ xipLinkAddress(i_image) + xipImageSize(i_image) - 1,
+ size);
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+
+/// Get the magic number from the image
+
+XIP_STATIC uint64_t
+xipMagic(const void* i_image)
+{
+ return xipRevLe64(((SbeXipHeader*)i_image)->iv_magic);
+}
+
+
+/// Get the header version from the image
+
+XIP_STATIC uint8_t
+xipHeaderVersion(const void* i_image)
+{
+ return ((SbeXipHeader*)i_image)->iv_headerVersion;
+}
+
+
+/// Has the image been normalized?
+
+XIP_STATIC uint8_t
+xipNormalized(const void* i_image)
+{
+ return ((SbeXipHeader*)i_image)->iv_normalized;
+}
+
+
+/// Has the image TOC been sorted?
+
+XIP_STATIC uint8_t
+xipSorted(const void* i_image)
+{
+ return ((SbeXipHeader*)i_image)->iv_tocSorted;
+}
+
+
+/// A quick check that the image exists, has the correct magic and header
+/// version, and optionally is normalized.
+
+XIP_STATIC int
+xipQuickCheck(const void* i_image, const int i_normalizationRequired)
+{
+ int rc;
+
+ do {
+ rc = 0;
+
+ if (i_image == 0) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "Image pointer is NULL (0)\n");
+ break;
+ }
+ if ((xipMagic(i_image) >> 32) != SBE_XIP_MAGIC) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "Magic number mismatch; Found "
+ "" F0x016llx ", expected 0x%08x........\n",
+ xipMagic(i_image), SBE_XIP_MAGIC);
+ break;
+ }
+ if ((xipHeaderVersion(i_image)) != SBE_XIP_HEADER_VERSION) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "Header version mismatch; Expecting %d, "
+ "found %d\n",
+ SBE_XIP_HEADER_VERSION,
+ xipHeaderVersion(i_image));
+ break;
+ }
+ if (i_normalizationRequired && !xipNormalized(i_image)) {
+ rc = TRACE_ERRORX(SBE_XIP_NOT_NORMALIZED,
+ "Image not normalized\n");
+ break;
+ }
+ } while(0);
+
+ return rc;
+}
+
+
+/// Convert a 32-bit relocatable offset to a full PORE 48-bit address
+
+XIP_STATIC uint64_t
+xipFullAddress(const void* i_image, uint32_t offset)
+{
+ return (xipLinkAddress(i_image) & 0x0000ffff00000000ull) + offset;
+}
+
+
+/// Translate a section table entry
+
+XIP_STATIC void
+xipTranslateSection(SbeXipSection* o_dest, const SbeXipSection* i_src)
+{
+#ifndef _BIG_ENDIAN
+
+#if SBE_XIP_HEADER_VERSION != 8
+#error This code assumes the SBE-XIP header version 8 layout
+#endif
+
+ o_dest->iv_offset = xipRevLe32(i_src->iv_offset);
+ o_dest->iv_size = xipRevLe32(i_src->iv_size);
+ o_dest->iv_alignment = i_src->iv_alignment;
+ o_dest->iv_reserved8[0] = 0;
+ o_dest->iv_reserved8[1] = 0;
+ o_dest->iv_reserved8[2] = 0;
+#else
+ if (o_dest != i_src) {
+ *o_dest = *i_src;
+ }
+#endif /* _BIG_ENDIAN */
+}
+
+
+/// Translate a TOC entry
+
+XIP_STATIC void
+xipTranslateToc(SbeXipToc* o_dest, SbeXipToc* i_src)
+{
+#ifndef _BIG_ENDIAN
+
+#if SBE_XIP_HEADER_VERSION != 8
+#error This code assumes the SBE-XIP header version 8 layout
+#endif
+
+ o_dest->iv_id = xipRevLe32(i_src->iv_id);
+ o_dest->iv_data = xipRevLe32(i_src->iv_data);
+ o_dest->iv_type = i_src->iv_type;
+ o_dest->iv_section = i_src->iv_section;
+ o_dest->iv_elements = i_src->iv_elements;
+ o_dest->iv_pad = 0;
+#else
+ if (o_dest != i_src) {
+ *o_dest = *i_src;
+ }
+#endif /* _BIG_ENDIAN */
+}
+
+
+/// Find the final (highest-address) section of the image
+
+XIP_STATIC int
+xipFinalSection(const void* i_image, int* o_sectionId)
+{
+ int i, rc, found;
+ uint32_t offset;
+ SbeXipHeader hostHeader;
+
+ sbe_xip_translate_header(&hostHeader, (SbeXipHeader*)i_image);
+
+ found = 0;
+ offset = 0;
+ *o_sectionId = 0; /* Make GCC -O3 happy */
+ for (i = 0; i < SBE_XIP_SECTIONS; i++) {
+ if ((hostHeader.iv_section[i].iv_size != 0) &&
+ (hostHeader.iv_section[i].iv_offset >= offset)) {
+ *o_sectionId = i;
+ offset = hostHeader.iv_section[i].iv_offset;
+ found = 1;
+ }
+ }
+ if (!found) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR, "The image is empty\n");
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+
+/// Return a pointer to an image-format section table entry
+
+XIP_STATIC int
+xipGetSectionPointer(const void* i_image,
+ const int i_sectionId,
+ SbeXipSection** o_imageSection)
+{
+ int rc;
+
+ if ((i_sectionId < 0) || (i_sectionId >= SBE_XIP_SECTIONS)) {
+ rc = TRACE_ERROR(SBE_XIP_INVALID_ARGUMENT);
+ } else {
+ *o_imageSection =
+ &(((SbeXipHeader*)i_image)->iv_section[i_sectionId]);
+ rc = 0;
+ }
+ return rc;
+}
+
+
+/// Restore a section table entry from host format to image format.
+
+XIP_STATIC int
+xipPutSection(const void* i_image,
+ const int i_sectionId,
+ SbeXipSection* i_hostSection)
+{
+ int rc;
+ SbeXipSection *imageSection;
+
+ rc = xipGetSectionPointer(i_image, i_sectionId, &imageSection);
+
+ if (!rc) {
+ xipTranslateSection(imageSection, i_hostSection);
+ }
+
+ return rc;
+}
+
+
+/// Set the offset of a section
+
+XIP_STATIC int
+xipSetSectionOffset(void* io_image, const int i_section,
+ const uint32_t i_offset)
+{
+ SbeXipSection* section;
+ int rc;
+
+ rc = xipGetSectionPointer(io_image, i_section, &section);
+ if (!rc) {
+ section->iv_offset = xipRevLe32(i_offset);
+ }
+ return rc;
+}
+
+
+/// Set the size of a section
+
+XIP_STATIC int
+xipSetSectionSize(void* io_image, const int i_section, const uint32_t i_size)
+{
+ SbeXipSection* section;
+ int rc;
+
+ rc = xipGetSectionPointer(io_image, i_section, &section);
+ if (!rc) {
+ section->iv_size = xipRevLe32(i_size);
+ }
+ return rc;
+}
+
+
+/// Translate a PORE address in the image to a section and offset
+
+// We first check to be sure that the PORE address is contained in the image,
+// using the full 48-bit form. Then we scan the section table to see which
+// section contains the address - if none then the image is corrupted. We can
+// (must) use the 32-bit offset form of the address here.
+
+XIP_STATIC int
+xipPore2Section(const void* i_image,
+ const uint64_t i_poreAddress,
+ int* o_section,
+ uint32_t* o_offset)
+{
+ int rc, sectionId;
+ SbeXipSection section;
+ uint32_t addressOffset;
+
+ do {
+ rc = 0;
+
+ if ((i_poreAddress < xipLinkAddress(i_image)) ||
+ (i_poreAddress >
+ (xipLinkAddress(i_image) + xipImageSize(i_image)))) {
+ rc = TRACE_ERRORX(SBE_XIP_INVALID_ARGUMENT,
+ "pore2section: The i_poreAddress argument "
+ "(" F0x016llx ")\nis outside the bounds of the "
+ "image (" F0x016llx ":" F0x016llx ")\n",
+ i_poreAddress,
+ xipLinkAddress(i_image),
+ xipLinkAddress(i_image) + xipImageSize(i_image));
+ break;
+ }
+
+ addressOffset = (i_poreAddress - xipLinkAddress(i_image)) & 0xffffffff;
+
+ for (sectionId = 0; sectionId < SBE_XIP_SECTIONS; sectionId++) {
+ rc = sbe_xip_get_section(i_image, sectionId, &section);
+ if (rc) {
+ rc = TRACE_ERROR(SBE_XIP_BUG); /* Can't happen */
+ break;
+ }
+ if ((section.iv_size != 0) &&
+ (addressOffset >= section.iv_offset) &&
+ (addressOffset < (section.iv_offset + section.iv_size))) {
+ break;
+ }
+ }
+ if (rc) break;
+
+ if (sectionId == SBE_XIP_SECTIONS) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "Error processing PORE address " F0x016llx ". "
+ "The address is not mapped in any section.\n"
+ "A section table dump appears below\n",
+ i_poreAddress);
+ dumpSectionTable(i_image);
+ break;
+ }
+
+ *o_section = sectionId;
+ *o_offset = addressOffset - section.iv_offset;
+
+ } while(0);
+
+ return rc;
+}
+
+
+/// Get the information required to search the TOC.
+///
+/// All return values are optional.
+
+XIP_STATIC int
+xipGetToc(void* i_image,
+ SbeXipToc** o_toc,
+ size_t* o_entries,
+ int* o_sorted,
+ char** o_strings)
+{
+ int rc;
+ SbeXipSection tocSection, stringsSection;
+
+ do {
+ rc = sbe_xip_get_section(i_image, SBE_XIP_SECTION_TOC, &tocSection);
+ if (rc) break;
+
+ rc = sbe_xip_get_section(i_image, SBE_XIP_SECTION_STRINGS,
+ &stringsSection);
+ if (rc) break;
+
+ if (o_toc) {
+ *o_toc = (SbeXipToc*)((uint8_t*)i_image + tocSection.iv_offset);
+ }
+ if (o_entries) {
+ *o_entries = tocSection.iv_size / sizeof(SbeXipToc);
+ }
+ if (o_sorted) {
+ *o_sorted = xipSorted(i_image);
+ }
+ if (o_strings) {
+ *o_strings = (char*)i_image + stringsSection.iv_offset;
+ }
+ } while (0);
+ return rc;
+}
+
+
+/// Compare two normalized TOC entries for sorting.
+
+XIP_STATIC int
+xipCompareToc(const SbeXipToc* i_a, const SbeXipToc* i_b,
+ const char* i_strings)
+{
+ return strcmp(i_strings + xipRevLe32(i_a->iv_id),
+ i_strings + xipRevLe32(i_b->iv_id));
+}
+
+
+/// Iterative quicksort of the TOC
+
+// Note: The stack requirement is limited to 256 bytes + minor local storage.
+
+XIP_STATIC void
+xipQuickSort(SbeXipToc* io_toc, int i_left, int i_right,
+ const char* i_strings)
+{
+ int i, j, left, right, sp;
+ SbeXipToc pivot, temp;
+ uint32_t stack[64];
+
+ sp = 0;
+ stack[sp++] = i_left;
+ stack[sp++] = i_right;
+
+ while (sp) {
+
+ right = stack[--sp];
+ left = stack[--sp];
+
+ i = left;
+ j = right;
+
+ pivot = io_toc[(i + j) / 2];
+
+ while (i <= j) {
+ while (xipCompareToc(&(io_toc[i]), &pivot, i_strings) < 0) {
+ i++;
+ }
+ while (xipCompareToc(&(io_toc[j]), &pivot, i_strings) > 0) {
+ j--;
+ }
+ if (i <= j) {
+ temp = io_toc[i];
+ io_toc[i] = io_toc[j];
+ io_toc[j] = temp;
+ i++;
+ j--;
+ }
+ }
+ if (left < j) {
+ stack[sp++] = left;
+ stack[sp++] = j;
+ }
+ if (i < right) {
+ stack[sp++] = i;
+ stack[sp++] = right;
+ }
+ }
+}
+
+
+/// TOC linear search
+
+XIP_STATIC int
+xipLinearSearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
+{
+ int rc;
+ SbeXipToc *imageToc, hostToc;
+ size_t entries;
+ char* strings;
+
+ *o_entry = 0;
+ rc = xipGetToc(i_image, &imageToc, &entries, 0, &strings);
+ if (!rc) {
+ for (; entries; entries--, imageToc++) {
+ xipTranslateToc(&hostToc, imageToc);
+ if (strcmp(i_id, strings + hostToc.iv_id) == 0) {
+ break;
+ }
+ }
+ if (entries) {
+ *o_entry = imageToc;
+ rc = 0;
+ } else {
+ *o_entry = 0;
+ rc = TRACE_ERROR(SBE_XIP_ITEM_NOT_FOUND);
+ }
+ }
+ return rc;
+}
+
+
+/// A classic binary search of a (presumed) sorted array
+
+XIP_STATIC int
+xipBinarySearch(void* i_image, const char* i_id, SbeXipToc** o_entry)
+{
+ int rc;
+ SbeXipToc *imageToc;
+ size_t entries;
+ char* strings;
+ int sorted, left, right, next, sort;
+
+ do {
+ *o_entry = 0;
+
+ rc = xipGetToc(i_image, &imageToc, &entries, &sorted, &strings);
+ if (rc) break;
+
+ if (!sorted) {
+ rc = TRACE_ERROR(SBE_XIP_BUG);
+ break;
+ }
+
+ left = 0;
+ right = entries - 1;
+ while (left <= right) {
+ next = (left + right) / 2;
+ sort = strcmp(i_id, strings + xipRevLe32(imageToc[next].iv_id));
+ if (sort == 0) {
+ *o_entry = &(imageToc[next]);
+ break;
+ } else if (sort < 0) {
+ right = next - 1;
+ } else {
+ left = next + 1;
+ }
+ }
+ if (*o_entry == 0) {
+ rc = TRACE_ERROR(SBE_XIP_ITEM_NOT_FOUND);
+ break;
+ }
+ } while (0);
+ return rc;
+}
+
+
+/// Validate a TOC entry as a mapping function
+///
+/// The TOC is validated by searching for the entry, which will uncover
+/// duplicate entries or problems with sorting/searching.
+
+XIP_STATIC int
+xipValidateTocEntry(void* io_image, const SbeXipItem* i_item, void* io_arg)
+{
+ int rc;
+ SbeXipItem found;
+
+ (void)io_arg;
+
+ do {
+ rc = sbe_xip_find(io_image, i_item->iv_id, &found);
+ if (rc) {
+ rc = TRACE_ERRORX(rc, "TOC entry for %s not found\n",
+ i_item->iv_id);
+ } else if (found.iv_toc != i_item->iv_toc) {
+ rc = TRACE_ERRORX(SBE_XIP_TOC_ERROR,
+ "Duplicate TOC entry for '%s'\n", i_item->iv_id);
+ }
+ break;
+ } while (0);
+ return rc;
+}
+
+
+// This is the FNV-1a hash, used for hashing symbol names in the .fixed
+// section into 32-bit hashes for the mini-TOC.
+
+// According to the authors:
+
+// "FNV hash algorithms and source code have been released into the public
+// domain. The authors of the FNV algorithmm look deliberate steps to disclose
+// the algorhtm (sic) in a public forum soon after it was invented. More than
+// a year passed after this public disclosure and the authors deliberatly took
+// no steps to patent the FNV algorithm. Therefore it is safe to say that the
+// FNV authors have no patent claims on the FNV algorithm as published."
+
+#define FNV_OFFSET_BASIS 2166136261u
+#define FNV_PRIME32 16777619u
+
+static uint32_t
+xipHash32(const char* s)
+{
+ uint32_t hash;
+
+ hash = FNV_OFFSET_BASIS;
+ while (*s) {
+ hash ^= *s++;
+ hash *= FNV_PRIME32;
+ }
+ return hash;
+}
+
+
+// Normalize a TOC entry
+
+// Normalize the TOC entry by converting relocatable pointers into 32-bit
+// offsets from the beginning of the section containing the data. All
+// addresses in the TOC are actually 32-bit offsets in the address space named
+// in bits 16:31 of the link address of the image.
+
+XIP_STATIC int
+xipNormalizeToc(void* io_image, SbeXipToc *io_imageToc,
+ SbeXipHashedToc** io_fixedTocEntry,
+ size_t* io_fixedEntriesRemaining)
+{
+ SbeXipToc hostToc;
+ int idSection, dataSection;
+ uint32_t idOffset, dataOffset;
+ char* hostString;
+ int rc;
+
+ do {
+
+ // Translate the TOC entry to host format. Then locate the
+ // sections/offsets of the Id string (which must be in .strings) and
+ // the data.
+
+ xipTranslateToc(&hostToc, io_imageToc);
+
+ hostString =
+ (char*)xipPore2Host(io_image,
+ xipFullAddress(io_image, hostToc.iv_id));
+
+ rc = xipPore2Section(io_image,
+ xipFullAddress(io_image, hostToc.iv_id),
+ &idSection,
+ &idOffset);
+ if (rc) break;
+
+ if (idSection != SBE_XIP_SECTION_STRINGS) {
+ rc = TRACE_ERROR(SBE_XIP_IMAGE_ERROR);
+ break;
+ }
+
+ rc = xipPore2Section(io_image,
+ xipFullAddress(io_image, hostToc.iv_data),
+ &dataSection,
+ &dataOffset);
+ if (rc) break;
+
+ // Now replace the Id and data pointers with their offsets, and update
+ // the data section in the TOC entry.
+
+ hostToc.iv_id = idOffset;
+ hostToc.iv_data = dataOffset;
+ hostToc.iv_section = dataSection;
+
+ // If this TOC entry is from .fixed, create a new record in .fixed_toc
+
+ if (hostToc.iv_section == SBE_XIP_SECTION_FIXED) {
+
+ if (*io_fixedEntriesRemaining == 0) {
+ rc = TRACE_ERRORX(SBE_XIP_TOC_ERROR,
+ "Too many TOC entries for .fixed\n");
+ break;
+ }
+ if (hostToc.iv_data != (uint16_t)hostToc.iv_data) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "The .fixed section is too big to index\n");
+ break;
+ }
+
+ (*io_fixedTocEntry)->iv_hash = xipRevLe32(xipHash32(hostString));
+ (*io_fixedTocEntry)->iv_offset = xipRevLe16(hostToc.iv_data);
+ (*io_fixedTocEntry)->iv_type = hostToc.iv_type;
+ (*io_fixedTocEntry)->iv_elements = hostToc.iv_elements;
+
+ (*io_fixedTocEntry)++;
+ (*io_fixedEntriesRemaining)--;
+ }
+
+ // Finally update the TOC entry
+
+ xipTranslateToc(io_imageToc, &hostToc);
+
+ } while (0);
+
+ return rc;
+}
+
+
+// Check for hash collisions in the .fixed mini-TOC. Note that endianness is
+// not an issue here, as we're comparing for equality.
+
+XIP_STATIC int
+xipHashCollision(SbeXipHashedToc* i_fixedToc, size_t i_entries)
+{
+ int rc;
+ size_t i, j;
+
+ rc = 0;
+
+ for (i = 0; i < i_entries; i++) {
+ for (j = i + 1; j < i_entries; j++) {
+ if (i_fixedToc[i].iv_hash == i_fixedToc[j].iv_hash) {
+ rc = TRACE_ERRORX(SBE_XIP_HASH_COLLISION,
+ "Hash collision at index %d\n",
+ i);
+ break;
+ }
+ }
+ if (rc) break;
+ }
+
+ return rc;
+}
+
+
+/// Decode a normalized image-format TOC entry into a host-format SbeXipItem
+/// structure
+
+XIP_STATIC int
+xipDecodeToc(void* i_image,
+ SbeXipToc* i_imageToc,
+ SbeXipItem* o_item)
+{
+ int rc;
+ SbeXipToc hostToc;
+ SbeXipSection dataSection, stringsSection;
+
+ do {
+ if (!xipNormalized(i_image)) {
+ rc = TRACE_ERROR(SBE_XIP_NOT_NORMALIZED);
+ break;
+ }
+
+
+ // Translate the TOC entry and set the TOC pointer, data type and
+ // number of elements in the outgoing structure. The Id string is
+ // always located in the TOC_STRINGS section.
+
+ xipTranslateToc(&hostToc, i_imageToc);
+
+ o_item->iv_toc = i_imageToc;
+ o_item->iv_type = hostToc.iv_type;
+ o_item->iv_elements = hostToc.iv_elements;
+
+ sbe_xip_get_section(i_image, SBE_XIP_SECTION_STRINGS, &stringsSection);
+ o_item->iv_id =
+ (char*)i_image + stringsSection.iv_offset + hostToc.iv_id;
+
+
+ // The data (or text address) are addressed by relative offsets from
+ // the beginning of their section. The TOC entry may remain in the TOC
+ // even though the section has been removed from the image, so this
+ // case needs to be covered.
+
+ rc = sbe_xip_get_section(i_image, hostToc.iv_section, &dataSection);
+ if (rc) break;
+
+ if (dataSection.iv_size == 0) {
+ rc = TRACE_ERROR(SBE_XIP_DATA_NOT_PRESENT);
+ break;
+ }
+
+ o_item->iv_imageData =
+ (void*)((uint8_t*)i_image +
+ dataSection.iv_offset + hostToc.iv_data);
+
+ o_item->iv_address =
+ xipLinkAddress(i_image) + dataSection.iv_offset + hostToc.iv_data;
+
+ o_item->iv_partial = 0;
+
+ } while (0);
+ return rc;
+}
+
+
+/// Sort the TOC
+
+XIP_STATIC int
+xipSortToc(void* io_image)
+{
+ int rc;
+ SbeXipToc *hostToc;
+ size_t entries;
+ char* strings;
+
+ do {
+ rc = xipQuickCheck(io_image, 1);
+ if (rc) break;
+
+ if (xipSorted(io_image)) break;
+
+ rc = xipGetToc(io_image, &hostToc, &entries, 0, &strings);
+ if (rc) break;
+
+ xipQuickSort(hostToc, 0, entries - 1, strings);
+
+ ((SbeXipHeader*)io_image)->iv_tocSorted = 1;
+
+ } while (0);
+
+ return rc;
+}
+
+
+// Pad the image with 0 to a given power-of-2 alignment. The image size is
+// modified to reflect the pad, but the caller must modify the section size to
+// reflect the pad.
+
+XIP_STATIC int
+xipPadImage(void* io_image, uint32_t i_allocation,
+ uint32_t i_align, uint32_t* pad)
+{
+ int rc;
+
+ do {
+ rc = 0;
+
+ if ((i_align == 0) || ((i_align & (i_align - 1)) != 0)) {
+ rc = TRACE_ERRORX(SBE_XIP_INVALID_ARGUMENT,
+ "Alignment specification (%u) "
+ "not a power-of-2\n",
+ i_align);
+ break;
+ }
+
+ *pad = xipImageSize(io_image) % i_align;
+ if (*pad != 0) {
+ *pad = i_align - *pad;
+
+ if ((xipImageSize(io_image) + *pad) > i_allocation) {
+ rc = TRACE_ERROR(SBE_XIP_WOULD_OVERFLOW);
+ break;
+ }
+
+ memset((void*)((unsigned long)io_image + xipImageSize(io_image)),
+ 0, *pad);
+ xipSetImageSize(io_image, xipImageSize(io_image) + *pad);
+ }
+ } while (0);
+
+ return rc;
+}
+
+
+// Get the .fixed_toc section
+
+XIP_STATIC int
+xipGetFixedToc(void* io_image,
+ SbeXipHashedToc** o_imageToc,
+ size_t* o_entries)
+{
+ int rc;
+ SbeXipSection section;
+
+ rc = sbe_xip_get_section(io_image, SBE_XIP_SECTION_FIXED_TOC, &section);
+ if (!rc) {
+
+ *o_imageToc =
+ (SbeXipHashedToc*)((unsigned long)io_image + section.iv_offset);
+
+ *o_entries = section.iv_size / sizeof(SbeXipHashedToc);
+ }
+
+ return rc;
+}
+
+
+// Search for an item in the fixed TOC, and populate a partial TOC entry if
+// requested. This table is small and unsorted so a linear search is
+// adequate. The TOC structures are also small so all byte-reversal is done
+// 'by hand' rather than with a translate-type API.
+
+XIP_STATIC int
+xipFixedFind(void* i_image, const char* i_id, SbeXipItem* o_item)
+{
+ int rc;
+ SbeXipHashedToc* toc;
+ size_t entries;
+ uint32_t hash;
+ SbeXipSection fixedSection;
+ uint32_t offset;
+
+ do {
+ rc = xipGetFixedToc(i_image, &toc, &entries);
+ if (rc) break;
+
+ for (hash = xipRevLe32(xipHash32(i_id)); entries != 0; entries--, toc++) {
+ if (toc->iv_hash == hash) break;
+ }
+
+ if (entries == 0) {
+ rc = SBE_XIP_ITEM_NOT_FOUND;
+ break;
+ } else {
+ rc = 0;
+ }
+
+ // The caller may have requested a lookup only (o_item == 0), in which
+ // case we're done. Otherwise we create a partial SbeXipItem and
+ // populate the non-0 fields analogously to the xipDecodeToc()
+ // routine. The data resides in the .fixed section in this case.
+
+ if (o_item == 0) break;
+
+ o_item->iv_partial = 1;
+ o_item->iv_toc = 0;
+ o_item->iv_id = 0;
+
+ o_item->iv_type = toc->iv_type;
+ o_item->iv_elements = toc->iv_elements;
+
+ rc = sbe_xip_get_section(i_image, SBE_XIP_SECTION_FIXED, &fixedSection);
+ if (rc) break;
+
+ if (fixedSection.iv_size == 0) {
+ rc = TRACE_ERROR(SBE_XIP_DATA_NOT_PRESENT);
+ break;
+ }
+
+ offset = fixedSection.iv_offset + xipRevLe16(toc->iv_offset);
+
+ o_item->iv_imageData = (void*)((uint8_t*)i_image + offset);
+ o_item->iv_address = xipLinkAddress(i_image) + offset;
+
+ } while (0);
+
+ return rc;
+}
+
+
+// Search for an item in the special built-in TOC of header fields, and
+// populate a partial TOC entry if requested.
+//
+// This facility was added to allow header data to be searched by name even
+// when the TOC has been stripped. This API will only be used in the case of a
+// stripped TOC since the header fields are also indexed in the main TOC.
+//
+// The table is allocated on the stack in order to make this code concurrently
+// patchable in PHYP (although PHYP applications will never use this code).
+// The table is small and unsorted so a linear search is adequate, and the
+// stack requirememts are small.
+
+XIP_STATIC int
+xipHeaderFind(void* i_image, const char* i_id, SbeXipItem* o_item)
+{
+ int rc;
+ unsigned i;
+ uint32_t offset;
+ SbeXipSection headerSection;
+
+#define HEADER_TOC(id, field, type) \
+ {#id, offsetof(SbeXipHeader, field), type}
+
+ struct HeaderToc {
+
+ const char* iv_id;
+ uint16_t iv_offset;
+ uint8_t iv_type;
+
+ } toc[] = {
+
+ HEADER_TOC(magic, iv_magic, SBE_XIP_UINT64),
+ HEADER_TOC(entry_offset, iv_entryOffset, SBE_XIP_UINT64),
+ HEADER_TOC(link_address, iv_linkAddress, SBE_XIP_UINT64),
+
+ HEADER_TOC(image_size, iv_imageSize, SBE_XIP_UINT32),
+ HEADER_TOC(build_date, iv_buildDate, SBE_XIP_UINT32),
+ HEADER_TOC(build_time, iv_buildTime, SBE_XIP_UINT32),
+
+ HEADER_TOC(header_version, iv_headerVersion, SBE_XIP_UINT8),
+ HEADER_TOC(toc_normalized, iv_normalized, SBE_XIP_UINT8),
+ HEADER_TOC(toc_sorted, iv_tocSorted, SBE_XIP_UINT8),
+
+ HEADER_TOC(build_user, iv_buildUser, SBE_XIP_STRING),
+ HEADER_TOC(build_host, iv_buildHost, SBE_XIP_STRING),
+
+ };
+
+ do {
+
+ rc = SBE_XIP_ITEM_NOT_FOUND;
+ for (i = 0; i < (sizeof(toc) / sizeof(struct HeaderToc)); i++) {
+ if (strcmp(i_id, toc[i].iv_id) == 0) {
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc) break;
+
+ // The caller may have requested a lookup only (o_item == 0), in which
+ // case we're done. Otherwise we create a partial SbeXipItem and
+ // populate the non-0 fields analogously to the xipDecodeToc()
+ // routine. The data resides in the .fixed section in this case.
+
+ if (o_item == 0) break;
+
+ o_item->iv_partial = 1;
+ o_item->iv_toc = 0;
+ o_item->iv_id = 0;
+
+ o_item->iv_type = toc[i].iv_type;
+ o_item->iv_elements = 1; /* True for now... */
+
+ rc = sbe_xip_get_section(i_image, SBE_XIP_SECTION_HEADER,
+ &headerSection);
+ if (rc) break;
+
+ if (headerSection.iv_size == 0) {
+ rc = TRACE_ERROR(SBE_XIP_DATA_NOT_PRESENT);
+ break;
+ }
+
+ offset = headerSection.iv_offset + toc[i].iv_offset;
+
+ o_item->iv_imageData = (void*)((uint8_t*)i_image + offset);
+ o_item->iv_address = xipLinkAddress(i_image) + offset;
+
+ } while (0);
+
+ return rc;
+}
+
+
+////////////////////////////////////////////////////////////////////////////
+// Published API
+////////////////////////////////////////////////////////////////////////////
+
+int
+sbe_xip_validate(void* i_image, const uint32_t i_size)
+{
+ SbeXipHeader hostHeader;
+ int rc = 0, i;
+ uint32_t linkAddress, imageSize, extent, offset, size;
+ uint8_t alignment;
+
+ sbe_xip_translate_header(&hostHeader, (SbeXipHeader*)i_image);
+
+ do {
+
+ // Validate C/Assembler constraints.
+
+ if (sizeof(SbeXipSection) != SIZE_OF_SBE_XIP_SECTION) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipSection\n",
+ sizeof(SbeXipSection), SIZE_OF_SBE_XIP_SECTION);
+ break;
+ }
+
+ if (sizeof(SbeXipToc) != SIZE_OF_SBE_XIP_TOC) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipToc\n",
+ sizeof(SbeXipToc), SIZE_OF_SBE_XIP_TOC);
+ break;
+ }
+
+ if (sizeof(SbeXipHashedToc) != SIZE_OF_SBE_XIP_HASHED_TOC) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipHashedToc\n",
+ sizeof(SbeXipHashedToc),
+ SIZE_OF_SBE_XIP_HASHED_TOC);
+ break;
+ }
+
+ // Validate the image pointer and magic number
+
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ // Validate the image size
+
+ linkAddress = hostHeader.iv_linkAddress;
+ imageSize = hostHeader.iv_imageSize;
+ extent = linkAddress + imageSize;
+
+ if (imageSize < sizeof(SbeXipHeader)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate(%p, %u) : "
+ "The image size recorded in the image "
+ "(%u) is smaller than the header size.\n",
+ i_image, i_size, imageSize);
+ break;
+ }
+ if (imageSize != i_size) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate(%p, %u) : "
+ "The image size recorded in the image "
+ "(%u) does not match the i_size parameter.\n",
+ i_image, i_size, imageSize);
+ break;
+ }
+ if (extent <= linkAddress) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate(%p, %u) : "
+ "Given the link address (%u) and the "
+ "image size, the image wraps the address space\n",
+ i_image, i_size, linkAddress);
+ break;
+ }
+ if ((imageSize % SBE_XIP_FINAL_ALIGNMENT) != 0) {
+ rc = TRACE_ERRORX(SBE_XIP_ALIGNMENT_ERROR,
+ "sbe_xip_validate(%p, %u) : "
+ "The image size (%u) is not a multiple of %u\n",
+ i_image, i_size, imageSize,
+ SBE_XIP_FINAL_ALIGNMENT);
+ break;
+ }
+
+ // Validate that all sections appear to be within the image
+ // bounds, and are aligned correctly.
+
+ for (i = 0; i < SBE_XIP_SECTIONS; i++) {
+
+ offset = hostHeader.iv_section[i].iv_offset;
+ size = hostHeader.iv_section[i].iv_size;
+ alignment = hostHeader.iv_section[i].iv_alignment;
+
+ if ((offset > imageSize) ||
+ ((offset + size) > imageSize) ||
+ ((offset + size) < offset)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "Section %d does not appear to be within "
+ "the bounds of the image\n"
+ "offset = %u, size = %u, image size = %u\n",
+ i, offset, size, imageSize);
+ break;
+ }
+ if ((offset % alignment) != 0) {
+ rc = TRACE_ERRORX(SBE_XIP_ALIGNMENT_ERROR,
+ "Section %d requires %d-byte initial "
+ "alignment but the section offset is %u\n",
+ i, alignment, offset);
+ break;
+ }
+ }
+ if (rc) break;
+
+ // If the TOC exists and the image is normalized, validate each TOC
+ // entry.
+
+ size = hostHeader.iv_section[SBE_XIP_SECTION_TOC].iv_size;
+ if (size != 0) {
+ if (xipNormalized(i_image)) {
+ rc = sbe_xip_map_toc(i_image, xipValidateTocEntry, 0);
+ if (rc) break;
+ }
+ }
+ } while (0);
+ return rc;
+}
+
+
+int
+sbe_xip_validate2(void* i_image, const uint32_t i_size, const uint32_t i_maskIgnores)
+{
+ SbeXipHeader hostHeader;
+ int rc = 0, i;
+ uint32_t linkAddress, imageSize, extent, offset, size;
+ uint8_t alignment;
+
+ sbe_xip_translate_header(&hostHeader, (SbeXipHeader*)i_image);
+
+ do {
+
+ // Validate C/Assembler constraints.
+
+ if (sizeof(SbeXipSection) != SIZE_OF_SBE_XIP_SECTION) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipSection\n",
+ sizeof(SbeXipSection), SIZE_OF_SBE_XIP_SECTION);
+ break;
+ }
+
+ if (sizeof(SbeXipToc) != SIZE_OF_SBE_XIP_TOC) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipToc\n",
+ sizeof(SbeXipToc), SIZE_OF_SBE_XIP_TOC);
+ break;
+ }
+
+ if (sizeof(SbeXipHashedToc) != SIZE_OF_SBE_XIP_HASHED_TOC) {
+ rc = TRACE_ERRORX(SBE_XIP_BUG,
+ "C/Assembler size mismatch(%d/%d) "
+ "for SbeXipHashedToc\n",
+ sizeof(SbeXipHashedToc),
+ SIZE_OF_SBE_XIP_HASHED_TOC);
+ break;
+ }
+
+ // Validate the image pointer and magic number
+
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ // Validate the image size
+
+ linkAddress = hostHeader.iv_linkAddress;
+ imageSize = hostHeader.iv_imageSize;
+ extent = linkAddress + imageSize;
+
+ if (imageSize < sizeof(SbeXipHeader)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "The image size recorded in the image "
+ "(%u) is smaller than the header size.\n",
+ i_image, i_size, imageSize);
+ break;
+ }
+ if (imageSize != i_size && !(i_maskIgnores & SBE_XIP_IGNORE_FILE_SIZE)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "The image size recorded in the image "
+ "(%u) does not match the i_size parameter.\n",
+ i_image, i_size, imageSize);
+ break;
+ }
+ if (extent <= linkAddress) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "Given the link address (%u) and the "
+ "image size, the image wraps the address space\n",
+ i_image, i_size, linkAddress);
+ break;
+ }
+ if ((imageSize % SBE_XIP_FINAL_ALIGNMENT) != 0) {
+ rc = TRACE_ERRORX(SBE_XIP_ALIGNMENT_ERROR,
+ "sbe_xip_validate2(%p, %u) : "
+ "The image size (%u) is not a multiple of %u\n",
+ i_image, i_size, imageSize,
+ SBE_XIP_FINAL_ALIGNMENT);
+ break;
+ }
+
+ // Validate that all sections appear to be within the image
+ // bounds, and are aligned correctly.
+
+ for (i = 0; i < SBE_XIP_SECTIONS; i++) {
+
+ offset = hostHeader.iv_section[i].iv_offset;
+ size = hostHeader.iv_section[i].iv_size;
+ alignment = hostHeader.iv_section[i].iv_alignment;
+
+ if ((offset > imageSize) ||
+ ((offset + size) > imageSize) ||
+ ((offset + size) < offset)) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "Section %d does not appear to be within "
+ "the bounds of the image\n"
+ "offset = %u, size = %u, image size = %u\n",
+ i, offset, size, imageSize);
+ break;
+ }
+ if ((offset % alignment) != 0) {
+ rc = TRACE_ERRORX(SBE_XIP_ALIGNMENT_ERROR,
+ "Section %d requires %d-byte initial "
+ "alignment but the section offset is %u\n",
+ i, alignment, offset);
+ break;
+ }
+ }
+ if (rc) break;
+
+ // If the TOC exists and the image is normalized, validate each TOC
+ // entry.
+
+ size = hostHeader.iv_section[SBE_XIP_SECTION_TOC].iv_size;
+ if (size != 0) {
+ if (xipNormalized(i_image)) {
+ rc = sbe_xip_map_toc(i_image, xipValidateTocEntry, 0);
+ if (rc) break;
+ }
+ }
+ } while (0);
+ return rc;
+}
+
+
+// Normalization:
+//
+// 1. Normalize the TOC, unless the image is already normalized. The image
+// must be marked as normalized before sorting.
+//
+// 2. Sort the TOC.
+//
+// 3. Clear the section offsets of any empty sections to make the section
+// table reports less confusing.
+//
+// 4. Clear normalization status on any failure.
+
+int
+sbe_xip_normalize(void* io_image)
+{
+ int rc, i;
+ SbeXipSection section;
+ SbeXipToc* imageToc;
+ SbeXipHashedToc* fixedImageToc;
+ SbeXipHashedToc* fixedTocEntry;
+ size_t tocEntries, fixedTocEntries, fixedEntriesRemaining;
+
+ do {
+ rc = xipQuickCheck(io_image, 0);
+ if (rc) break;
+
+ if (!xipNormalized(io_image)) {
+
+ rc = xipGetToc(io_image, &imageToc, &tocEntries, 0, 0);
+ if (rc) break;
+
+ rc = xipGetFixedToc(io_image, &fixedImageToc, &fixedTocEntries);
+ if (rc) break;
+
+ fixedTocEntry = fixedImageToc;
+ fixedEntriesRemaining = fixedTocEntries;
+
+ for (; tocEntries--; imageToc++) {
+ rc = xipNormalizeToc(io_image, imageToc,
+ &fixedTocEntry, &fixedEntriesRemaining);
+ if (rc) break;
+
+ }
+ if (rc) break;
+
+ if (fixedEntriesRemaining != 0) {
+ rc = TRACE_ERRORX(SBE_XIP_TOC_ERROR,
+ "Not enough TOC entries for .fixed");
+ break;
+ }
+
+ rc = xipHashCollision(fixedImageToc, fixedTocEntries);
+ if (rc) break;
+
+ ((SbeXipHeader*)io_image)->iv_normalized = 1;
+ }
+
+ rc = xipSortToc(io_image);
+ if (rc) break;
+
+ for (i = 0; i < SBE_XIP_SECTIONS; i++) {
+ rc = sbe_xip_get_section(io_image, i, &section);
+ if (rc) break;
+ if (section.iv_size == 0) {
+ xipSetSectionOffset(io_image, i, 0);
+ }
+ }
+ if (rc) break;
+
+ } while(0);
+
+ ((SbeXipHeader*)io_image)->iv_normalized = (rc == 0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_image_size(void* io_image, uint32_t* o_size)
+{
+ int rc;
+
+ rc = xipQuickCheck(io_image, 0);
+ if (!rc) {
+ *o_size = xipImageSize(io_image);
+ }
+ return rc;
+}
+
+
+int
+sbe_xip_get_section(const void* i_image,
+ const int i_sectionId,
+ SbeXipSection* o_hostSection)
+{
+ int rc;
+ SbeXipSection *imageSection;
+
+ rc = xipGetSectionPointer(i_image, i_sectionId, &imageSection);
+
+ if (!rc) {
+ xipTranslateSection(o_hostSection, imageSection);
+ }
+
+ return rc;
+}
+
+
+// If the 'big' TOC is not present, search the mini-TOCs that only index the
+// .fixed and .header sections.
+
+int
+sbe_xip_find(void* i_image,
+ const char* i_id,
+ SbeXipItem* o_item)
+{
+ int rc;
+ SbeXipToc* toc;
+ SbeXipItem item, *pitem;
+ SbeXipSection* tocSection;
+
+ do {
+ rc = xipQuickCheck(i_image, 1);
+ if (rc) break;
+
+ rc = xipGetSectionPointer(i_image, SBE_XIP_SECTION_TOC, &tocSection);
+ if (rc) break;
+
+ if (tocSection->iv_size == 0) {
+ rc = xipFixedFind(i_image, i_id, o_item);
+ if (rc) {
+ rc = xipHeaderFind(i_image, i_id, o_item);
+ }
+ break;
+ }
+
+ if (xipSorted(i_image)) {
+ rc = xipBinarySearch(i_image, i_id, &toc);
+ } else {
+ rc = xipLinearSearch(i_image, i_id, &toc);
+ }
+ if (rc) break;
+
+ if (o_item) {
+ pitem = o_item;
+ } else {
+ pitem = &item;
+ }
+ rc = xipDecodeToc(i_image, toc, pitem);
+ if (rc) break;
+
+ } while (0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_map_halt(void* io_image,
+ int (*i_fn)(void* io_image,
+ const uint64_t i_poreAddress,
+ const char* i_rcString,
+ void* io_arg),
+ void* io_arg)
+{
+ int rc;
+ SbeXipSection haltSection;
+ SbeXipHalt *halt;
+ uint32_t size;
+ uint32_t actualSize;
+
+ do {
+ rc = xipQuickCheck(io_image, 0);
+ if (rc) break;
+
+ rc = sbe_xip_get_section(io_image, SBE_XIP_SECTION_HALT, &haltSection);
+ if (rc) break;
+
+ halt = (SbeXipHalt*)((unsigned long)io_image + haltSection.iv_offset);
+ size = haltSection.iv_size;
+
+ while (size) {
+
+ rc = i_fn(io_image,
+ xipRevLe64(halt->iv_address),
+ halt->iv_string,
+ io_arg);
+ if (rc) break;
+
+ // The SbeXipHalt structure claims a 4-character string. The
+ // computation below computes the actual record size based on the
+ // actual length of the string, including the 0-byte termination.
+
+ actualSize = 8 + (((strlen(halt->iv_string) + 4) / 4) * 4);
+
+ if (size < actualSize) {
+ rc = TRACE_ERRORX(SBE_XIP_IMAGE_ERROR,
+ "The .halt section is improperly formed\n");
+ break;
+ }
+
+ size -= actualSize;
+ halt = (SbeXipHalt*)((unsigned long)halt + actualSize);
+ };
+
+ if (rc) break;
+
+ } while (0);
+
+ return rc;
+}
+
+
+typedef struct {
+ uint64_t iv_address;
+ const char* iv_string;
+} GetHaltStruct;
+
+
+XIP_STATIC int
+xipGetHaltMap(void* io_image,
+ const uint64_t i_poreAddress,
+ const char* i_rcString,
+ void* io_arg)
+{
+ int rc;
+ GetHaltStruct* s = (GetHaltStruct*)io_arg;
+
+ (void)io_image;
+
+ if (i_poreAddress == s->iv_address) {
+ s->iv_string = i_rcString;
+ rc = -1;
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+
+int
+sbe_xip_get_halt(void* io_image,
+ const uint64_t i_poreAddress,
+ const char** o_rcString)
+{
+ int rc;
+ GetHaltStruct s;
+
+ s.iv_address = i_poreAddress;
+ do {
+ rc = xipQuickCheck(io_image, 0);
+ if (rc) break;
+
+ rc = sbe_xip_map_halt(io_image, xipGetHaltMap, &s);
+ if (rc == 0) {
+ rc = TRACE_ERRORX(SBE_XIP_ITEM_NOT_FOUND,
+ "sbe_xip_get_halt: No HALT code is associated "
+ "with address " F0x012llx "\n", i_poreAddress);
+ } else if (rc < 0) {
+ *o_rcString = s.iv_string;
+ rc = 0;
+ }
+ } while (0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_get_scalar(void *i_image, const char* i_id, uint64_t* o_data)
+{
+ int rc;
+ SbeXipItem item;
+
+ rc = sbe_xip_find(i_image, i_id, &item);
+ if (!rc) {
+ switch (item.iv_type) {
+ case SBE_XIP_UINT8:
+ *o_data = *((uint8_t*)(item.iv_imageData));
+ break;
+ case SBE_XIP_UINT32:
+ *o_data = xipRevLe32(*((uint32_t*)(item.iv_imageData)));
+ break;
+ case SBE_XIP_UINT64:
+ *o_data = xipRevLe64(*((uint64_t*)(item.iv_imageData)));
+ break;
+ case SBE_XIP_ADDRESS:
+ *o_data = item.iv_address;
+ break;
+ default:
+ rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
+ break;
+ }
+ }
+ return rc;
+}
+
+
+int
+sbe_xip_get_element(void *i_image,
+ const char* i_id,
+ const uint32_t i_index,
+ uint64_t* o_data)
+{
+ int rc;
+ SbeXipItem item;
+
+ do {
+ rc = sbe_xip_find(i_image, i_id, &item);
+ if (rc) break;
+
+ if ((item.iv_elements != 0) && (i_index >= item.iv_elements)) {
+ rc = TRACE_ERROR(SBE_XIP_BOUNDS_ERROR);
+ break;
+ }
+
+ switch (item.iv_type) {
+ case SBE_XIP_UINT8:
+ *o_data = ((uint8_t*)(item.iv_imageData))[i_index];
+ break;
+ case SBE_XIP_UINT32:
+ *o_data = xipRevLe32(((uint32_t*)(item.iv_imageData))[i_index]);
+ break;
+ case SBE_XIP_UINT64:
+ *o_data = xipRevLe64(((uint64_t*)(item.iv_imageData))[i_index]);
+ break;
+ default:
+ rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
+ break;
+ }
+ if (rc) break;
+
+ } while (0);
+ return rc;
+}
+
+
+int
+sbe_xip_get_string(void *i_image, const char* i_id, char** o_data)
+{
+ int rc;
+ SbeXipItem item;
+
+ rc = sbe_xip_find(i_image, i_id, &item);
+ if (!rc) {
+ switch (item.iv_type) {
+ case SBE_XIP_STRING:
+ *o_data = (char*)(item.iv_imageData);
+ break;
+ default:
+ rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
+ break;
+ }
+ }
+ return rc;
+}
+
+
+int
+sbe_xip_read_uint64(const void *i_image,
+ const uint64_t i_poreAddress,
+ uint64_t* o_data)
+{
+ int rc;
+
+ do {
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ rc = xipValidatePoreAddress(i_image, i_poreAddress, 8);
+ if (rc) break;
+
+ if (i_poreAddress % 8) {
+ rc = TRACE_ERROR(SBE_XIP_ALIGNMENT_ERROR);
+ break;
+ }
+
+ *o_data =
+ xipRevLe64(*((uint64_t*)xipPore2Host(i_image, i_poreAddress)));
+
+ } while(0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_set_scalar(void* io_image, const char* i_id, const uint64_t i_data)
+{
+ int rc;
+ SbeXipItem item;
+
+ rc = sbe_xip_find(io_image, i_id, &item);
+ if (!rc) {
+ switch(item.iv_type) {
+ case SBE_XIP_UINT8:
+ *((uint8_t*)(item.iv_imageData)) = (uint8_t)i_data;
+ break;
+ case SBE_XIP_UINT32:
+ *((uint32_t*)(item.iv_imageData)) = xipRevLe32((uint32_t)i_data);
+ break;
+ case SBE_XIP_UINT64:
+ *((uint64_t*)(item.iv_imageData)) = xipRevLe64((uint64_t)i_data);
+ break;
+ default:
+ rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
+ break;
+ }
+ }
+ return rc;
+}
+
+
+int
+sbe_xip_set_element(void *i_image,
+ const char* i_id,
+ const uint32_t i_index,
+ const uint64_t i_data)
+{
+ int rc;
+ SbeXipItem item;
+
+ do {
+ rc = sbe_xip_find(i_image, i_id, &item);
+ if (rc) break;
+
+ if ((item.iv_elements != 0) && (i_index >= item.iv_elements)) {
+ rc = TRACE_ERROR(SBE_XIP_BOUNDS_ERROR);
+ break;
+ }
+
+ switch (item.iv_type) {
+ case SBE_XIP_UINT8:
+ ((uint8_t*)(item.iv_imageData))[i_index] = (uint8_t)i_data;
+ break;
+ case SBE_XIP_UINT32:
+ ((uint32_t*)(item.iv_imageData))[i_index] =
+ xipRevLe32((uint32_t)i_data);
+ break;
+ case SBE_XIP_UINT64:
+ ((uint64_t*)(item.iv_imageData))[i_index] =
+ xipRevLe64((uint64_t)i_data);
+ break;
+ default:
+ rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
+ break;
+ }
+ if (rc) break;
+
+ } while (0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_set_string(void *i_image, const char* i_id, const char* i_data)
+{
+ int rc;
+ SbeXipItem item;
+ char* dest;
+
+ rc = sbe_xip_find(i_image, i_id, &item);
+ if (!rc) {
+ switch (item.iv_type) {
+ case SBE_XIP_STRING:
+ dest = (char*)(item.iv_imageData);
+ if (strlen(dest) < strlen(i_data)) {
+ memcpy(dest, i_data, strlen(dest));
+ } else {
+ strcpy(dest, i_data);
+ }
+ break;
+ default:
+ rc = TRACE_ERROR(SBE_XIP_TYPE_ERROR);
+ break;
+ }
+ }
+ return rc;
+}
+
+
+int
+sbe_xip_write_uint64(void *io_image,
+ const uint64_t i_poreAddress,
+ const uint64_t i_data)
+{
+ int rc;
+
+ do {
+ rc = xipQuickCheck(io_image, 0);
+ if (rc) break;
+
+ rc = xipValidatePoreAddress(io_image, i_poreAddress, 8);
+ if (rc) break;
+
+ if (i_poreAddress % 8) {
+ rc = TRACE_ERROR(SBE_XIP_ALIGNMENT_ERROR);
+ break;
+ }
+
+ *((uint64_t*)xipPore2Host(io_image, i_poreAddress)) =
+ xipRevLe64(i_data);
+
+ } while(0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_delete_section(void* io_image, const int i_sectionId)
+{
+ int rc, final;
+ SbeXipSection section;
+
+ do {
+ rc = xipQuickCheck(io_image, 1);
+ if (rc) break;
+
+ rc = sbe_xip_get_section(io_image, i_sectionId, &section);
+ if (rc) break;
+
+
+ // Deleting an empty section is a NOP. Otherwise the section must be
+ // the final section of the image. Update the sizes and re-establish
+ // the final image alignment.
+
+ if (section.iv_size == 0) break;
+
+ rc = xipFinalSection(io_image, &final);
+ if (rc) break;
+
+ if (final != i_sectionId) {
+ rc = TRACE_ERRORX(SBE_XIP_SECTION_ERROR,
+ "Attempt to delete non-final section %d\n",
+ i_sectionId);
+ break;
+ }
+
+ xipSetSectionOffset(io_image, i_sectionId, 0);
+ xipSetSectionSize(io_image, i_sectionId, 0);
+
+
+ // For cleanliness we also remove any alignment padding that had been
+ // appended between the now-last section and the deleted section, then
+ // re-establish the final alignment. The assumption is that all images
+ // always have the correct final alignment, so there is no way this
+ // could overflow a designated buffer space since the image size is
+ // the same or has been reduced.
+
+ rc = xipFinalSection(io_image, &final);
+ if (rc) break;
+
+ rc = sbe_xip_get_section(io_image, final, &section);
+ if (rc) break;
+
+ xipSetImageSize(io_image, section.iv_offset + section.iv_size);
+ xipFinalAlignment(io_image);
+
+ } while (0);
+
+ return rc;
+}
+
+
+#ifndef PPC_HYP
+
+// This API is not needed by PHYP procedures, and is elided since PHYP does
+// not support malloc().
+
+int
+sbe_xip_duplicate_section(const void* i_image,
+ const int i_sectionId,
+ void** o_duplicate,
+ uint32_t* o_size)
+{
+ SbeXipSection section;
+ int rc;
+
+ *o_duplicate = 0;
+
+ do {
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ rc = sbe_xip_get_section(i_image, i_sectionId, &section);
+ if (rc) break;
+
+ if (section.iv_size == 0) {
+ rc = TRACE_ERRORX(SBE_XIP_SECTION_ERROR,
+ "Attempt to duplicate empty section %d\n",
+ i_sectionId);
+ break;
+ }
+
+ *o_duplicate = malloc(section.iv_size);
+ *o_size = section.iv_size;
+
+ if (*o_duplicate == 0) {
+ rc = TRACE_ERROR(SBE_XIP_NO_MEMORY);
+ break;
+ }
+
+ memcpy(*o_duplicate,
+ xipHostAddressFromOffset(i_image, section.iv_offset),
+ section.iv_size);
+
+
+ } while (0);
+
+ if (rc) {
+ free(*o_duplicate);
+ *o_duplicate = 0;
+ *o_size = 0;
+ }
+
+ return rc;
+}
+
+#endif // PPC_HYP
+
+
+// The append must be done in such a way that if the append fails, the image
+// is not modified. This behavior is required by applications that
+// speculatively append until the allocation fails, but still require the
+// final image to be valid. To accomplish this the initial image size and
+// section statistics are captured at entry, and restored in the event of an
+// error.
+
+int
+sbe_xip_append(void* io_image,
+ const int i_sectionId,
+ const void* i_data,
+ const uint32_t i_size,
+ const uint32_t i_allocation,
+ uint32_t* o_sectionOffset)
+{
+ SbeXipSection section, initialSection;
+ int rc, final, restoreOnError;
+ void* hostAddress;
+ uint32_t pad, initialSize;
+
+ do {
+ restoreOnError = 0;
+
+ rc = xipQuickCheck(io_image, 1);
+ if (rc) break;
+
+ rc = sbe_xip_get_section(io_image, i_sectionId, &section);
+ if (rc) break;
+
+ if (i_size == 0) break;
+
+ initialSection = section;
+ initialSize = xipImageSize(io_image);
+ restoreOnError = 1;
+
+ if (section.iv_size == 0) {
+
+ // The section is empty, and now becomes the final section. Pad
+ // the image to the specified section alignment. Note that the
+ // size of the previously final section does not change.
+
+ rc = xipPadImage(io_image, i_allocation, section.iv_alignment,
+ &pad);
+ if (rc) break;
+ section.iv_offset = xipImageSize(io_image);
+
+ } else {
+
+ // Otherwise, the section must be the final section in order to
+ // continue. Remove any padding from the image.
+
+ rc = xipFinalSection(io_image, &final);
+ if (rc) break;
+
+ if (final != i_sectionId) {
+ rc = TRACE_ERRORX(SBE_XIP_SECTION_ERROR,
+ "Attempt to append to non-final section "
+ "%d\n", i_sectionId);
+ break;
+ }
+ xipSetImageSize(io_image, section.iv_offset + section.iv_size);
+ }
+
+
+ // Make sure the allocated space won't overflow. Set the return
+ // parameter o_sectionOffset and copy the new data into the image (or
+ // simply clear the space).
+
+ if ((xipImageSize(io_image) + i_size) > i_allocation) {
+ rc = TRACE_ERROR(SBE_XIP_WOULD_OVERFLOW);
+ break;
+ }
+ if (o_sectionOffset != 0) {
+ *o_sectionOffset = section.iv_size;
+ }
+
+ hostAddress =
+ xipHostAddressFromOffset(io_image, xipImageSize(io_image));
+ if (i_data == 0) {
+ memset(hostAddress, 0, i_size);
+ } else {
+ memcpy(hostAddress, i_data, i_size);
+ }
+
+
+ // Update the image size and section table. Note that the final
+ // alignment may push out of the allocation.
+
+ xipSetImageSize(io_image, xipImageSize(io_image) + i_size);
+ xipFinalAlignment(io_image);
+
+ if (xipImageSize(io_image) > i_allocation) {
+ rc = TRACE_ERROR(SBE_XIP_WOULD_OVERFLOW);
+ break;
+ }
+
+ section.iv_size += i_size;
+
+ if (xipPutSection(io_image, i_sectionId, &section) != 0) {
+ rc = TRACE_ERROR(SBE_XIP_BUG); /* Can't happen */
+ break;
+ }
+
+
+ // Special case
+
+ if (i_sectionId == SBE_XIP_SECTION_TOC) {
+ ((SbeXipHeader*)io_image)->iv_tocSorted = 0;
+ }
+
+ } while (0);
+
+ if (rc && restoreOnError) {
+ if (xipPutSection(io_image, i_sectionId, &initialSection) != 0) {
+ rc = TRACE_ERROR(SBE_XIP_BUG); /* Can't happen */
+ }
+ xipSetImageSize(io_image, initialSize);
+ }
+
+ return rc;
+}
+
+
+int
+sbe_xip_section2pore(const void* i_image,
+ const int i_sectionId,
+ const uint32_t i_offset,
+ uint64_t* o_poreAddress)
+{
+ int rc;
+ SbeXipSection section;
+
+ do {
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ rc = sbe_xip_get_section(i_image, i_sectionId, &section);
+ if (rc) break;
+
+ if (section.iv_size == 0) {
+ rc = TRACE_ERROR(SBE_XIP_SECTION_ERROR);
+ break;
+ }
+
+ if (i_offset > (section.iv_offset + section.iv_size)) {
+ rc = TRACE_ERROR(SBE_XIP_INVALID_ARGUMENT);
+ break;
+ }
+
+ *o_poreAddress = xipLinkAddress(i_image) + section.iv_offset + i_offset;
+
+ if (*o_poreAddress % 4) {
+ rc = TRACE_ERROR(SBE_XIP_ALIGNMENT_ERROR);
+ break;
+ }
+
+ } while(0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_pore2section(const void* i_image,
+ const uint64_t i_poreAddress,
+ int* i_section,
+ uint32_t* i_offset)
+{
+ int rc;
+
+ do {
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ rc = xipPore2Section(i_image, i_poreAddress, i_section, i_offset);
+
+ } while(0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_pore2host(const void* i_image,
+ const uint64_t i_poreAddress,
+ void** o_hostAddress)
+{
+ int rc;
+
+ do {
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ if ((i_poreAddress < xipLinkAddress(i_image)) ||
+ (i_poreAddress >
+ (xipLinkAddress(i_image) + xipImageSize(i_image)))) {
+ rc = TRACE_ERROR(SBE_XIP_INVALID_ARGUMENT);
+ break;
+ }
+
+ *o_hostAddress =
+ xipHostAddressFromOffset(i_image,
+ i_poreAddress - xipLinkAddress(i_image));
+ } while(0);
+
+ return rc;
+}
+
+
+int
+sbe_xip_host2pore(const void* i_image,
+ void* i_hostAddress,
+ uint64_t* o_poreAddress)
+{
+ int rc;
+
+ do {
+ rc = xipQuickCheck(i_image, 0);
+ if (rc) break;
+
+ if ((i_hostAddress < i_image) ||
+ (i_hostAddress >
+ xipHostAddressFromOffset(i_image, xipImageSize(i_image)))) {
+ rc = TRACE_ERROR(SBE_XIP_INVALID_ARGUMENT);
+ break;
+ }
+
+ *o_poreAddress = xipLinkAddress(i_image) +
+ ((unsigned long)i_hostAddress - (unsigned long)i_image);
+ if (*o_poreAddress % 4) {
+ rc = TRACE_ERROR(SBE_XIP_ALIGNMENT_ERROR);
+ break;
+ }
+ } while(0);
+
+ return rc;
+}
+
+
+void
+sbe_xip_translate_header(SbeXipHeader* o_dest, const SbeXipHeader* i_src)
+{
+#ifndef _BIG_ENDIAN
+ int i;
+ SbeXipSection* destSection;
+ const SbeXipSection* srcSection;
+
+#if SBE_XIP_HEADER_VERSION != 8
+#error This code assumes the SBE-XIP header version 8 layout
+#endif
+
+ o_dest->iv_magic = xipRevLe64(i_src->iv_magic);
+ o_dest->iv_entryOffset = xipRevLe64(i_src->iv_entryOffset);
+ o_dest->iv_linkAddress = xipRevLe64(i_src->iv_linkAddress);
+
+ for (i = 0; i < 5; i++) {
+ o_dest->iv_reserved64[i] = 0;
+ }
+
+ for (i = 0, destSection = o_dest->iv_section,
+ srcSection = i_src->iv_section;
+ i < SBE_XIP_SECTIONS;
+ i++, destSection++, srcSection++) {
+ xipTranslateSection(destSection, srcSection);
+ }
+
+ o_dest->iv_imageSize = xipRevLe32(i_src->iv_imageSize);
+ o_dest->iv_buildDate = xipRevLe32(i_src->iv_buildDate);
+ o_dest->iv_buildTime = xipRevLe32(i_src->iv_buildTime);
+
+ for (i = 0; i < 5; i++) {
+ o_dest->iv_reserved32[i] = 0;
+ }
+
+ o_dest->iv_headerVersion = i_src->iv_headerVersion;
+ o_dest->iv_normalized = i_src->iv_normalized;
+ o_dest->iv_tocSorted = i_src->iv_tocSorted;
+
+ for (i = 0; i < 3; i++) {
+ o_dest->iv_reserved8[i] = 0;
+ }
+
+ memcpy(o_dest->iv_buildUser, i_src->iv_buildUser,
+ sizeof(i_src->iv_buildUser));
+ memcpy(o_dest->iv_buildHost, i_src->iv_buildHost,
+ sizeof(i_src->iv_buildHost));
+ memcpy(o_dest->iv_reservedChar, i_src->iv_reservedChar,
+ sizeof(i_src->iv_reservedChar));
+
+#else
+ if (o_dest != i_src) {
+ *o_dest = *i_src;
+ }
+#endif /* _BIG_ENDIAN */
+}
+
+
+int
+sbe_xip_map_toc(void* io_image,
+ int (*i_fn)(void* io_image,
+ const SbeXipItem* i_item,
+ void* io_arg),
+ void* io_arg)
+{
+ int rc;
+ SbeXipToc *imageToc;
+ SbeXipItem item;
+ size_t entries;
+
+ do {
+ rc = xipQuickCheck(io_image, 0);
+ if (rc) break;
+
+ rc = xipGetToc(io_image, &imageToc, &entries, 0, 0);
+ if (rc) break;
+
+ for (; entries--; imageToc++) {
+ rc = xipDecodeToc(io_image, imageToc, &item);
+ if (rc) break;
+ rc = i_fn(io_image, &item, io_arg);
+ if (rc) break;
+ }
+ } while(0);
+
+ return rc;
+}
diff --git a/libpore/sbe_xip_image.h b/libpore/sbe_xip_image.h
new file mode 100644
index 0000000..8ba55b4
--- /dev/null
+++ b/libpore/sbe_xip_image.h
@@ -0,0 +1,1784 @@
+/* IBM_PROLOG_BEGIN_TAG */
+/* This is an automatically generated prolog. */
+/* */
+/* $Source: src/usr/hwpf/hwp/build_winkle_images/p8_slw_build/sbe_xip_image.h $ */
+/* */
+/* OpenPOWER HostBoot Project */
+/* */
+/* COPYRIGHT International Business Machines Corp. 2012,2014 */
+/* */
+/* Licensed under the Apache License, Version 2.0 (the "License"); */
+/* you may not use this file except in compliance with the License. */
+/* You may obtain a copy of the License at */
+/* */
+/* http://www.apache.org/licenses/LICENSE-2.0 */
+/* */
+/* Unless required by applicable law or agreed to in writing, software */
+/* distributed under the License is distributed on an "AS IS" BASIS, */
+/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
+/* implied. See the License for the specific language governing */
+/* permissions and limitations under the License. */
+/* */
+/* IBM_PROLOG_END_TAG */
+#ifndef __SBE_XIP_IMAGE_H
+#define __SBE_XIP_IMAGE_H
+
+// $Id: sbe_xip_image.h,v 1.24 2013/06/13 20:26:33 bcbrock Exp $
+// $Source: /afs/awd/projects/eclipz/KnowledgeBase/.cvsroot/eclipz/chips/p8/working/procedures/ipl/sbe/sbe_xip_image.h,v $
+//-----------------------------------------------------------------------------
+// *! (C) Copyright International Business Machines Corp. 2011
+// *! All Rights Reserved -- Property of IBM
+// *! *** IBM Confidential ***
+//-----------------------------------------------------------------------------
+// *! OWNER NAME: Bishop Brock Email: bcbrock@us.ibm.com
+//------------------------------------------------------------------------------
+
+/// \file sbe_xip_image.h
+/// \brief Everything related to creating and manipulating SBE-XIP binary
+/// images.
+
+#include "fapi_sbe_common.H"
+
+/// Current version (fields, layout, sections) of the SBE_XIP header
+///
+/// If any changes are made to this file or to sbe_xip_header.H, please update
+/// the header version and follow-up on all of the error messages.
+
+#define SBE_XIP_HEADER_VERSION 8
+
+/// \defgroup sbe_xip_magic_numbers SBE-XIP magic numbers
+///
+/// An SBE-XIP magic number is a 64-bit constant. The 4 high-order bytes
+/// contain the ASCII characters "XIP " and identify the image as an SBE-XIP
+/// image, while the 4 low-order bytes identify the type of the image.
+///
+/// @{
+
+#define SBE_XIP_MAGIC 0x58495020 // "XIP "
+#define SBE_BASE_MAGIC ULL(0x5849502042415345) // "XIP BASE"
+#define SBE_SEEPROM_MAGIC ULL(0x584950205345504d) // "XIP SEPM"
+#define SBE_CENTAUR_MAGIC ULL(0x58495020434e5452) // "XIP CNTR"
+
+/// @}
+
+
+/// \defgroup sbe_xip_sections SBE-XIP Image Section Indexes
+///
+/// These constants define the order that the SbeXipSection structures appear
+/// in the header, which is not necessarily the order the sections appear in
+/// the binary image. Given that SBE-XIP image contents are tightly
+/// controlled, we use this simple indexing scheme for the allowed sections
+/// rather than a more general approach, e.g., allowing arbitrary sections
+/// identified by their names.
+///
+/// @{
+
+// -*- DO NOT REORDER OR EDIT THIS SET OF CONSTANTS WITHOUT ALSO EDITING -*-
+// -*- THE ASSEMBLER LAYOUT IN sbe_xip_header.H. -*-
+
+#define SBE_XIP_SECTION_HEADER 0
+#define SBE_XIP_SECTION_FIXED 1
+#define SBE_XIP_SECTION_FIXED_TOC 2
+#define SBE_XIP_SECTION_IPL_TEXT 3
+#define SBE_XIP_SECTION_IPL_DATA 4
+#define SBE_XIP_SECTION_TEXT 5
+#define SBE_XIP_SECTION_DATA 6
+#define SBE_XIP_SECTION_TOC 7
+#define SBE_XIP_SECTION_STRINGS 8
+#define SBE_XIP_SECTION_HALT 9
+#define SBE_XIP_SECTION_PIBMEM0 10
+#define SBE_XIP_SECTION_DCRINGS 11
+#define SBE_XIP_SECTION_RINGS 12
+#define SBE_XIP_SECTION_SLW 13
+#define SBE_XIP_SECTION_FIT 14
+#define SBE_XIP_SECTION_FFDC 15
+
+#define SBE_XIP_SECTIONS 16
+
+/// @}
+
+
+/// \defgroup sbe_xip_validate() ignore masks.
+///
+/// These defines, when matched in sbe_xip_validate(), cause the validation
+/// to skip the check of the corresponding property. The purpose is to more
+/// effectively debug images that may be damaged and which have excess info
+/// before or after the image. The latter will be the case when dumping the
+/// image as a memory block without knowing where the image starts and ends.
+///
+/// @{
+
+#define SBE_XIP_IGNORE_FILE_SIZE (uint32_t)0x00000001
+#define SBE_XIP_IGNORE_ALL (uint32_t)0x80000000
+
+/// @}
+
+
+#ifndef __ASSEMBLER__
+
+/// Applications can expand this macro to create an array of section names.
+#define SBE_XIP_SECTION_NAMES(var) \
+ const char* var[] = { \
+ ".header", \
+ ".fixed", \
+ ".fixed_toc", \
+ ".ipl_text", \
+ ".ipl_data", \
+ ".text", \
+ ".data", \
+ ".toc", \
+ ".strings", \
+ ".halt", \
+ ".pibmem0", \
+ ".dcrings", \
+ ".rings", \
+ ".slw", \
+ ".fit", \
+ ".ffdc", \
+ }
+
+/// Applications can use this macro to safely index the array of section
+/// names.
+#define SBE_XIP_SECTION_NAME(var, n) \
+ ((((n) < 0) || ((n) > (int)(sizeof(var) / sizeof(char*)))) ? \
+ "Bug : Invalid SBE-XIP section name" : var[n])
+
+
+#endif /* __ASSEMBLER__ */
+
+
+/// Maximum section alignment for SBE-XIP sections
+#define SBE_XIP_MAX_SECTION_ALIGNMENT 128
+
+/// \defgroup sbe_xip_toc_types SBE-XIP Table of Contents data types
+///
+/// These are the data types stored in the \a iv_type field of the SbeXipToc
+/// objects. These must be defined as manifest constants because they are
+/// required to be recognized as manifest constants in C (as opposed to C++)
+/// code.
+///
+/// NB: The 0x0 code is purposefully left undefined to catch bugs.
+///
+/// @{
+
+/// Data is a single unsigned byte
+#define SBE_XIP_UINT8 0x01
+
+/// Data is a 32-bit unsigned integer
+#define SBE_XIP_UINT32 0x02
+
+/// Data is a 64-bit unsigned integer
+#define SBE_XIP_UINT64 0x03
+
+/// Data is a 0-byte terminated ASCII string
+#define SBE_XIP_STRING 0x04
+
+/// Data is an address
+#define SBE_XIP_ADDRESS 0x05
+
+/// The maximum type number
+#define SBE_XIP_MAX_TYPE_INDEX 0x05
+
+/// Applications can expand this macro to get access to string forms of the
+/// SBE-XIP data types if desired.
+#define SBE_XIP_TYPE_STRINGS(var) \
+ const char* var[] = { \
+ "Illegal 0 Code", \
+ "SBE_XIP_UINT8", \
+ "SBE_XIP_UINT32", \
+ "SBE_XIP_UINT64", \
+ "SBE_XIP_STRING", \
+ "SBE_XIP_ADDRESS", \
+ }
+
+/// Applications can expand this macro to get access to abbreviated string
+/// forms of the SBE-XIP data types if desired.
+#define SBE_XIP_TYPE_ABBREVS(var) \
+ const char* var[] = { \
+ "Illegal 0 Code", \
+ "u8 ", \
+ "u32", \
+ "u64", \
+ "str", \
+ "adr", \
+ }
+
+/// Applications can use this macro to safely index either array of SBE-XIP
+/// type strings.
+#define SBE_XIP_TYPE_STRING(var, n) \
+ (((n) > (sizeof(var) / sizeof(char*))) ? \
+ "Invalid SBE-XIP type specification" : var[n])
+
+/// @}
+
+
+/// Final alignment constraint for SBE-XIP images.
+///
+/// PORE images are required to be multiples of 8 bytes in length, to
+/// gaurantee that the PoreVe will be able to complete any 8-byte load/store.
+#define SBE_XIP_FINAL_ALIGNMENT 8
+
+
+////////////////////////////////////////////////////////////////////////////
+// C Definitions
+////////////////////////////////////////////////////////////////////////////
+
+#ifndef __ASSEMBLER__
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#if 0
+} /* So __cplusplus doesn't mess w/auto-indent */
+#endif
+
+/// SBE-XIP Section information
+///
+/// This structure defines the data layout of section table entries in the
+/// SBE-XIP image header.
+
+// -*- DO NOT REORDER OR EDIT THIS STRUCTURE DEFINITION WITHOUT ALSO -*-
+// -*- EDITING THE ASSEMBLER LAYOUT IN sbe_xip_header.H -*-
+
+typedef struct {
+
+ /// The offset (in bytes) of the section from the beginning of the image
+ ///
+ /// In normalized images the section offset will always be 0 if the
+ /// section size is also 0.
+ uint32_t iv_offset;
+
+ /// The size of the section in bytes, exclusive of alignment padding
+ ///
+ /// This is the size of the program-significant data in the section,
+ /// exclusive of any alignment padding or reserved or extra space. The
+ /// alignment padding (reserved space) is not represented explicitly, but
+ /// is only implied by the offset of any subsequent non-empty section, or
+ /// in the case of the final section in the image, the image size.
+ ///
+ /// Regardless of the \a iv_offset, if the \a iv_size of a section is 0 it
+ /// should be considered "not present" in the image. In normalized images
+ /// the section offset will always be 0 if the section size is also 0.
+ uint32_t iv_size;
+
+ /// The required initial alignment for the section offset
+ ///
+ /// The PORE and the applications using SBE-XIP images have strict
+ /// alignment/padding requirements. The PORE does not handle any type of
+ /// unaligned instruction or data fetches. Some sections and subsections
+ /// must also be POWER cache-line aligned. The \a iv_alignment applies to
+ /// the first byte of the section. PORE images are also required to be
+ /// multiples of 8 bytes in length, to gaurantee that the PoreVe will be
+ /// able to complete any 8-byte load/store. These constraints are checked
+ /// by sbe_xip_validate() and enforced by sbe_xip_append(). The alignment
+ /// constraints may force a section to be padded, which may create "holes"
+ /// in the image as explained in the comments for the \a iv_size field.
+ ///
+ /// Note that alignment constraints are always checked relative to the
+ /// first byte of the image for in-memory images, not relative to the host
+ /// address. Alignment specifications are required to be a power-of-2.
+ uint8_t iv_alignment;
+
+ /// Reserved structure alignment padding; Pad to 12 bytes
+ uint8_t iv_reserved8[3];
+
+} SbeXipSection;
+
+/// The SbeXipSection structure is created by assembler code and is expected
+/// to have the same size in C code. This constraint is checked in
+/// sbe_xip_validate().
+#define SIZE_OF_SBE_XIP_SECTION 12
+
+
+/// SBE-XIP binary image header
+///
+/// This header occupies the initial bytes of an SBE-XIP binary image.
+/// The header contents are documented here, however the structure is actually
+/// defined in the file sbe_xip_header.S, and these two definitions must be
+/// kept consistent.
+///
+/// The header is a fixed-format representation of the most critical
+/// information about the image. The large majority of information about the
+/// image and its contents are available through the searchable table of
+/// contents. PORE code itself normally accesses the data directly through
+/// global symbols.
+///
+/// The header only contains information 1) required by OTPROM code (e.g., the
+/// entry point); 2) required by search and updating APIs (e.g., the
+/// locations and sizes of all of the sections.); a few pieces of critical
+/// meta-data (e.g., information about the image build process).
+///
+/// Any entries that are accessed by PORE code are required to be 64 bits, and
+/// will appear at the beginning of the header.
+///
+/// The header also contains bytewise offsets and sizes of all of the sections
+/// that are assembled to complete the image. The offsets are relative to the
+/// start of the image (where the header is loaded). The sizes include any
+/// padding inserted by the link editor to guarantee section alignment.
+///
+/// Every field of the header is also accesssible through the searchable table
+/// of contents as documented in sbe_xip_header.S.
+
+// -*- DO NOT REORDER OR EDIT THIS STRUCTURE DEFINITION WITHOUT ALSO -*-
+// -*- EDITING THE ASSEMBLER LAYOUT IN sbe_xip_header.S, AND WITHOUT -*-
+// -*- UPDATING THE sbe_xip_translate_header() API IN sbe_xip_image.c. -*-
+
+typedef struct {
+
+ //////////////////////////////////////////////////////////////////////
+ // Identification - 8-byte aligned; 8 entries
+ //////////////////////////////////////////////////////////////////////
+
+ /// Contains SBE_XIP_MAGIC to identify an SBE-XIP image
+ uint64_t iv_magic;
+
+ /// The offset of the SBE-XIP entry point from the start of the image
+ uint64_t iv_entryOffset;
+
+ /// The base address used to link the image, as a full relocatable PORE
+ /// address
+ uint64_t iv_linkAddress;
+
+ /// Reserved for future expansion
+ uint64_t iv_reserved64[5];
+
+ //////////////////////////////////////////////////////////////////////
+ // Section Table - 4-byte aligned; 16 entries
+ //////////////////////////////////////////////////////////////////////
+
+ SbeXipSection iv_section[SBE_XIP_SECTIONS];
+
+ //////////////////////////////////////////////////////////////////////
+ // Other information - 4-byte aligned; 8 entries
+ //////////////////////////////////////////////////////////////////////
+
+ /// The size of the image (including padding) in bytes
+ uint32_t iv_imageSize;
+
+ /// Build date generated by `date +%Y%m%d`, e.g., 20110630
+ uint32_t iv_buildDate;
+
+ /// Build time generated by `date +%H%M`, e.g., 0756
+ uint32_t iv_buildTime;
+
+ /// Reserved for future expansion
+ uint32_t iv_reserved32[5];
+
+ //////////////////////////////////////////////////////////////////////
+ // Other Information - 1-byte aligned; 8 entries
+ //////////////////////////////////////////////////////////////////////
+
+ /// Header format version number
+ uint8_t iv_headerVersion;
+
+ /// Indicates whether the image has been normalized (0/1)
+ uint8_t iv_normalized;
+
+ /// Indicates whether the TOC has been sorted to speed searching (0/1)
+ uint8_t iv_tocSorted;
+
+ /// Reserved for future expansion
+ uint8_t iv_reserved8[5];
+
+ //////////////////////////////////////////////////////////////////////
+ // Strings; 64 characters allocated
+ //////////////////////////////////////////////////////////////////////
+
+ /// Build user, generated by `id -un`
+ char iv_buildUser[16];
+
+ /// Build host, generated by `hostname`
+ char iv_buildHost[24];
+
+ /// Reserved for future expansion
+ char iv_reservedChar[24];
+
+} SbeXipHeader;
+
+
+/// A C-structure form of the SBE-XIP Table of Contents (TOC) entries
+///
+/// The .toc section consists entirely of an array of these structures.
+/// TOC entries are never accessed by PORE code.
+///
+/// These structures store indexing information for global data required to be
+/// manipulated by external tools. The actual data is usually allocated in a
+/// data section and manipulated by the SBE code using global or local symbol
+/// names. Each TOC entry contains a pointer to a keyword string naming the
+/// data, the address of the data (or the data itself), the data type,
+/// meta-information about the data, and for vectors the vector size.
+
+// -*- DO NOT REORDER OR EDIT THIS STRUCTURE DEFINITION WITHOUT ALSO -*-
+// -*- EDITING THE ASSEMBLER MACROS (BELOW) THAT CREATE THE TABLE OF -*-
+// -*- CONTENTS ENTRIES. -*-
+
+typedef struct {
+
+ /// A pointer to a 0-byte terminated ASCII string identifying the data.
+ ///
+ /// When allocated by the .xip_toc macro this is a pointer to the string
+ /// form of the symbol name for the global or local symbol associated with
+ /// the data which is allocated in the .strings section. This pointer is
+ /// not aligned.
+ ///
+ /// When the image is normalized this pointer is replaced by the offset of
+ /// the string in the .strings section.
+ uint32_t iv_id;
+
+ /// A 32-bit pointer locating the data
+ ///
+ /// This field is initially populated by the link editor. For scalar,
+ /// vector and string types this is the final relocated address of the
+ /// first byte of the data. For address types, this is the relocated
+ /// address. When the image is normalized, these addresses are converted
+ /// into the equivalent offsets from the beginning of the section holding
+ /// the data.
+ uint32_t iv_data;
+
+ /// The type of the data; See \ref sbe_xip_toc_types.
+ uint8_t iv_type;
+
+ /// The section containing the data; See \ref sbe_xip_sections.
+ uint8_t iv_section;
+
+ /// The number of elements for vector types, otherwise 1 for scalar types
+ /// and addresses.
+ ///
+ /// Vectors are naturally limited in size, e.g. to the number of cores,
+ /// chips in a node, DD-levels etc. If \a iv_elements is 0 then no bounds
+ /// checking is done on get/set accesses of the data.
+ uint8_t iv_elements;
+
+ /// Structure alignment padding; Pad to 12 bytes
+ uint8_t iv_pad;
+
+} SbeXipToc;
+
+/// The SbeXipToc structure is created by assembler code and is expected
+/// to have the same size in C code. This constraint is checked in
+/// sbe_xip_validate().
+#define SIZE_OF_SBE_XIP_TOC 12
+
+
+/// A C-structure form of hashed SBE-XIP Table of Contents (TOC) entries
+///
+/// This structure was introduced in order to allow a small TOC for the .fixed
+/// section to support minimum-sized SEEPROM images in which the global TOC
+/// and all strings have been stripped out. In this structure the index
+/// string has been replaced by a 32-bit hash, and there is no longer a record
+/// of the original data name other then the hash. The section of the data is
+/// assumed to be .fixed, with a maximum 16-bit offset.
+///
+/// These structures are created when entries are made in the .fixed section.
+/// They are created empty, then filled in during image normalization.
+///
+/// This structure allows the sbe_xip_get*() and sbe_xip_set*() APIs to work
+/// even on highly-stripped SEEPROM images.
+
+typedef struct {
+
+ /// A 32-bit hash (FNV-1a) of the Id string.
+ uint32_t iv_hash;
+
+ /// The offset in bytes from the start of the (implied) section of the data
+ uint16_t iv_offset;
+
+ /// The type of the data; See \ref sbe_xip_toc_types.
+ uint8_t iv_type;
+
+ /// The number of elements for vector types, otherwise 1 for scalar types
+ /// and addresses.
+ ///
+ /// Vectors are naturally limited in size, e.g. to the number of cores,
+ /// chips in a node, DD-levels etc. If \a iv_elements is 0 then no bounds
+ /// checking is done on get/set accesses of the data.
+ uint8_t iv_elements;
+
+} SbeXipHashedToc;
+
+/// The SbeXipHashedToc structure is created by assembler code and is expected
+/// to have the same size in C code. This constraint is checked in
+/// sbe_xip_validate().
+#define SIZE_OF_SBE_XIP_HASHED_TOC 8
+
+
+/// A decoded TOC entry for use by applications
+///
+/// This structure is a decoded form of a normalized TOC entry, filled in by
+/// the sbe_xip_decode_toc() and sbe_xip_find() APIs. This structure is
+/// always returned with data elements in host-endian format.
+///
+/// In the event that the TOC has been removed from the image, this structure
+/// will also be returned by sbe_xip_find() with information populated from
+/// the .fixed_toc section if possible. In this case the field \a iv_partial
+/// will be set and only the fields \a iv_address, \a iv_imageData, \a iv_type
+/// and \a iv_elements will be populated (all other fields will be set to 0).
+///
+/// \note Only special-purpose applications will ever need to use this
+/// structure given that the higher-level APIs sbe_xip_get_*() and
+/// sbe_xip_set_*() are provided and should be used if possible, especially
+/// given that the information may be truncated as described above.
+
+typedef struct {
+
+ /// A pointer to the associated TOC entry as it exists in the image
+ ///
+ /// If \a iv_partial is set this field is returned as 0.
+ SbeXipToc* iv_toc;
+
+ /// The full relocatable PORE address
+ ///
+ /// All relocatable addresses are computed from the \a iv_linkAddress
+ /// stored in the header. For scalar and string data, this is the
+ /// relocatable address of the data. For address-only entries, this is
+ /// the indexed address itself.
+ uint64_t iv_address;
+
+ /// A host pointer to the first byte of text or data within the image
+ ///
+ /// For scalar or string types this is a host pointer to the first byte of
+ /// the data. For code pointers (addresses) this is host pointer to the
+ /// first byte of code. Note that any use of this field requires the
+ /// caller to handle conversion of the data to host endian-ness if
+ /// required. Only 8-bit and string data can be used directly on all
+ /// hosts.
+ void* iv_imageData;
+
+ /// The item name
+ ///
+ /// This is a pointer in host memory to a string that names the TOC entry
+ /// requested. This field is set to a pointer to the ID string of the TOC
+ /// entry inside the image. If \a iv_partial is set this field is returned
+ /// as 0.
+ char* iv_id;
+
+ /// The data type, one of the SBE_XIP_* constants
+ uint8_t iv_type;
+
+ /// The number of elements in a vector
+ ///
+ /// This field is set from the TOC entry when the TOC entry is
+ /// decoded. This value is stored as 1 for scalar declarations, and may be
+ /// set to 0 for vectors with large or undeclared sizes. Otherwise it is
+ /// used to bounds check indexed accesses.
+ uint8_t iv_elements;
+
+ /// Is this record only partially populated?
+ ///
+ /// This field is set to 0 normally, and only set to 1 if a lookup is made
+ /// in an image that only has the fixed TOC and the requested Id hashes to
+ /// the fixed TOC.
+ uint8_t iv_partial;
+
+} SbeXipItem;
+
+
+/// Prototype entry in the .halt section
+///
+/// The .halt section is generated by the 'reqhalt' macro. This structure
+/// associates the address of each halt with the string form of the FAPI
+/// return code associated with the halt. The string form is used because the
+/// FAPI error return code is not constant. The .halt section is 4-byte
+/// aligned, and each address/string entry is always padded to a multiple of 4
+/// bytes.
+///
+/// In the .halt section the \a iv_string may be any length, thus the size of
+/// each actual record is variable (although guaranteed to always be a
+/// multiple of 4 bytes). Although the C compiler might natuarlly align
+/// instances of this structure on a 64-bit boundary, the APIs that allow
+/// access to the .halt section assume that the underlying machine can do
+/// non-aligned loads from a pointer to this structure.
+
+typedef struct {
+
+ /// The 64-bit relocatable address of the halt
+ ///
+ /// This is the address found in the PC (Status Register bits 16:63) when
+ /// the PORE halts. The full 64-bit form is used rather than the simple
+ /// 32-bit offset to support merging SEEPROM and PIBMEM .halt sections in
+ /// the SEEPROM IPL images.
+ uint64_t iv_address;
+
+ /// A C-prototype for a variable-length 0-terminated ASCII string
+ ///
+ /// This is a prototype only to simplify C programming. The actual string
+ /// may be any length.
+ char iv_string[4];
+
+} SbeXipHalt;
+
+
+/// Validate an SBE-XIP image
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory.
+///
+/// \param[in] i_size The putative size of the image
+///
+/// \param[in] i_maskIgnores Array of ignore bits representing which properties
+/// should not be checked for in sbe_xip_validate2().
+///
+/// This API should be called first by all applications that manipulate
+/// SBE-XIP images in host memory. The magic number is validated, and
+/// the image is checked for consistency of the section table and table of
+/// contents. The \a iv_imageSize field of the header must also match the
+/// provided \a i_size parameter. Validation does not modify the image.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_validate(void* i_image, const uint32_t i_size);
+
+int
+sbe_xip_validate2(void* i_image, const uint32_t i_size, const uint32_t i_maskIgnores);
+
+
+/// Normalize the SBE-XIP image
+///
+/// \param[in] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections.
+///
+/// SBE-XIP images must be normalized before any other APIs are allowed to
+/// operate on the image. Since normalization modifies the image, an explicit
+/// call to normalize the image is required. Briefly, normalization modifies
+/// the TOC entries created by the final link to simplify search, updates,
+/// modification and relocation of the image. Normalization is explained in
+/// the written documentation of the SBE-XIP binary format. Normalization does
+/// not modify the size of the image.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_normalize(void* io_image);
+
+
+/// Return the size of an SBE-XIP image from the image header
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections.
+///
+/// \param[out] o_size A pointer to a variable returned as the size of the
+/// image in bytes, as recorded in the image header.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_image_size(void* i_image, uint32_t* o_size);
+
+
+/// Locate a section table entry and translate into host format
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory.
+///
+/// \param[in] i_sectionId Identifies the section to be queried. See \ref
+/// sbe_xip_sections.
+///
+/// \param[out] o_hostSection Updated to contain the section table entry
+/// translated to host byte order.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_get_section(const void* i_image,
+ const int i_sectionId,
+ SbeXipSection* o_hostSection);
+
+
+/// Endian translation of an SbeXipHeader object
+///
+/// \param[out] o_hostHeader The destination object.
+///
+/// \param[in] i_imageHeader The source object.
+///
+/// Translation of a SbeXipHeader includes translation of all data members
+/// including traslation of the embedded section table. This translation
+/// works even if \a o_src == \a o_dest, i.e., in the destructive case.
+void
+sbe_xip_translate_header(SbeXipHeader* o_hostHeader,
+ const SbeXipHeader* i_imageHeader);
+
+
+/// Get scalar data from an SBE-XIP image
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_id A pointer to a 0-terminated ASCII string naming the item
+/// requested.
+///
+/// \param[out] o_data A pointer to an 8-byte integer to receive the scalar
+/// data. Assuming the item is located this variable is assigned by the call.
+/// In the event of an error the final state of \a o_data is not specified.
+///
+/// This API searches the SBE-XIP Table of Contents (TOC) for the item named
+/// \a i_id, assigning \a o_data from the image if the item is found and is a
+/// scalar value. Scalar values include 8- 32- and 64-bit integers and PORE
+/// addresses. Image data smaller than 64 bits are extracted as unsigned
+/// types, and it is the caller's responsibility to cast or convert the
+/// returned data as appropriate.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_get_scalar(void *i_image, const char* i_id, uint64_t* o_data);
+
+
+/// Get an integral element from a vector held in an SBE-XIP image
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_id A pointer to a 0-terminated ASCII string naming the item
+/// requested.
+///
+/// \param[in] i_index The index of the vector element to return.
+///
+/// \param[out] o_data A pointer to an 8-byte integer to receive the
+/// data. Assuming the item is located this variable is assigned by the call.
+/// In the event of an error the final state of \a o_data is not specified.
+///
+/// This API searches the SBE-XIP Table of Contents (TOC) for the \a i_index
+/// element of the item named \a i_id, assigning \a o_data from the image if
+/// the item is found, is a vector of an integral type, and the \a i_index is
+/// in bounds. Vector elements smaller than 64 bits are extracted as unsigned
+/// types, and it is the caller's responsibility to cast or convert the
+/// returned data as appropriate.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_get_element(void *i_image,
+ const char* i_id,
+ const uint32_t i_index,
+ uint64_t* o_data);
+
+
+/// Get string data from an SBE-XIP image
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_id A pointer to a 0-terminated ASCII string naming the item
+/// requested.
+///
+/// \param[out] o_data A pointer to a character pointer. Assuming the
+/// item is located this variable is assigned by the call to point to the
+/// string as it exists in the \a i_image. In the event of an error the final
+/// state of \a o_data is not specified.
+///
+/// This API searches the SBE-XIP Table of Contents (TOC) for the item named
+/// \a i_id, assigning \a o_data if the item is found and is a string. It is
+/// the caller's responsibility to copy the string from the \a i_image memory
+/// space if necessary.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_get_string(void *i_image, const char* i_id, char** o_data);
+
+
+/// Directly read 64-bit data from the image based on a PORE address
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections.
+///
+/// \param[in] i_poreAddress A relocatable PORE address contained in the
+/// image, presumably of an 8-byte data area. The \a i_poreAddress is
+/// required to be 8-byte aligned, otherwise the SBE_XIP_ALIGNMENT_ERROR code
+/// is returned.
+///
+/// \param[out] o_data The 64 bit data in host format that was found at \a
+/// i_poreAddress.
+///
+/// This API is provided for applications that need to manipulate SBE-XIP
+/// images in terms of their relocatable PORE addresses. The API checks that
+/// the \a i_poreAddress is properly aligned and contained in the image, then
+/// reads the contents of \a i_poreAddress into \a o_data, performing
+/// image-to-host endianess conversion if required.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_read_uint64(const void *i_image,
+ const uint64_t i_poreAddress,
+ uint64_t* o_data);
+
+
+/// Set scalar data in an SBE-XIP image
+///
+/// \param[in,out] io_image A pointer to an SBE-XIP image in host memory.
+/// The image is assumed to be consistent with the information contained in
+/// the header regarding the presence of and sizes of all sections. The image
+/// is also required to have been normalized.
+///
+/// \param[in] i_id A pointer to a 0-terminated ASCII string naming the item
+/// to be modified.
+///
+/// \param[in] i_data The new scalar data.
+///
+/// This API searches the SBE-XIP Table of Contents (TOC) for the item named
+/// by \a i_id, updating the image from \a i_data if the item is found, has
+/// a scalar type and can be modified. For this API the scalar types include
+/// 8- 32- and 64-bit integers. Although PORE addresses are considered a
+/// scalar type for sbe_xip_get_scalar(), PORE addresses can not be modified
+/// by this API. The caller is responsible for ensuring that the \a i_data is
+/// of the correct size for the underlying data element in the image.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_set_scalar(void* io_image, const char* i_id, const uint64_t i_data);
+
+
+/// Set an integral element in a vector held in an SBE-XIP image
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_id A pointer to a 0-terminated ASCII string naming the item
+/// to be updated.
+///
+/// \param[in] i_index The index of the vector element to update.
+///
+/// \param[out] i_data The new vector element.
+///
+/// This API searches the SBE-XIP Table of Contents (TOC) for the \a i_index
+/// element of the item named \a i_id, update the image from \a i_data if the
+/// item is found, is a vector of an integral type, and the \a i_index is in
+/// bounds. The caller is responsible for ensuring that the \a i_data is of
+/// the correct size for the underlying data element in the image.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_set_element(void *i_image,
+ const char* i_id,
+ const uint32_t i_index,
+ const uint64_t i_data);
+
+
+/// Set string data in an SBE-XIP image
+///
+/// \param[in,out] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_id A pointer to a 0-terminated ASCII string naming the item
+/// to be modified.
+///
+/// \param[in] i_data A pointer to the new string data.
+///
+/// This API searches the SBE-XIP Table of Contents (TOC) for the item named
+/// \a i_id, which must be a string variable. If found, then the string data
+/// in the image is overwritten with \a i_data. Strings are held 0-terminated
+/// in the image, and the SBE-XIP format does not maintain a record of the
+/// amount of memory allocated for an individual string. If a string is
+/// overwritten by a shorter string then the 'excess' storage is effectively
+/// lost. If the length of \a i_data is longer that the current strlen() of
+/// the string data then \a i_data is silently truncated to the first
+/// strlen(old_string) characters.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_set_string(void *io_image, const char* i_id, const char* i_data);
+
+
+/// Directly write 64-bit data into the image based on a PORE address
+///
+/// \param[in, out] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections.
+///
+/// \param[in] i_poreAddress A relocatable PORE address contained in the
+/// image, presumably of an 8-byte data area. The \a i_poreAddress is
+/// required to be 8-byte aligned, otherwise the SBE_XIP_ALIGNMENT_ERROR code
+/// is returned.
+///
+/// \param[in] i_data The 64 bit data in host format to be written to \a
+/// i_poreAddress.
+///
+/// This API is provided for applications that need to manipulate SBE-XIP
+/// images in terms of their relocatable PORE addresses. The API checks that
+/// the \a i_poreAddress is properly aligned and contained in the image, then
+/// updates the contents of \a i_poreAddress with \a i_data, performing
+/// host-to-image endianess conversion if required.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_write_uint64(void *io_image,
+ const uint64_t i_poreAddress,
+ const uint64_t i_data);
+
+
+/// Map over an SBE-XIP image Table of Contents
+///
+/// \param[in,out] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_fn A pointer to a function to call on each TOC entry. The
+/// function has the prototype:
+///
+/// \code
+/// int (*i_fn)(void* io_image,
+/// const SbeXipItem* i_item,
+/// void* io_arg)
+/// \endcode
+///
+/// \param[in,out] io_arg The private argument of \a i_fn.
+///
+/// This API iterates over each entry of the TOC, calling \a i_fn with
+/// pointers to the image, an SbeXipItem* pointer, and a private argument. The
+/// iteration terminates either when all TOC entries have been mapped, or \a
+/// i_fn returns a non-zero code.
+///
+/// \retval 0 Success; All TOC entries were mapped, including the case that
+/// the .toc section is empty.
+///
+/// \retval non-0 May be either one of the SBE-XIP image error codes (see \ref
+/// sbe_xip_image_errors), or a non-zero code from \a i_fn. Since the standard
+/// SBE_XIP return codes are > 0, application-defined codes should be < 0.
+int
+sbe_xip_map_toc(void* io_image,
+ int (*i_fn)(void* io_image,
+ const SbeXipItem* i_item,
+ void* io_arg),
+ void* io_arg);
+
+
+/// Find an SBE-XIP TOC entry
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_id A 0-byte terminated ASCII string naming the item to be
+/// searched for.
+///
+/// \param[out] o_item If the search is successful, then the object
+/// pointed to by \a o_item is filled in with the decoded form of the
+/// TOC entry for \a i_id. If the API returns a non-0 error code then the
+/// final state of the storage at \a o_item is undefined. This parameter may
+/// be suppied as 0, in which case sbe_xip_find() serves as a simple predicate
+/// on whether an item is indexded in the TOC.
+///
+/// This API searches the TOC of a normalized SBE-XIP image for the item named
+/// \a i_id, and if found, fills in the structure pointed to by \a
+/// o_item with a decoded form of the TOC entry. If the item is not found,
+/// the following two return codes may be considered non-error codes:
+///
+/// - SBE_XIP_ITEM_NOT_FOUND : No TOC record for \a i_id was found.
+///
+/// - SBE_XIP_DATA_NOT_PRESENT : The item appears in the TOC, however the
+/// section containing the data is no longer present in the image.
+///
+/// If the TOC section has been deleted from the image, then the search is
+/// restricted to the abbreviated TOC that indexes data in the .fixed section.
+/// In this case the \a o_item structure is marked with a 1 in the \a
+/// iv_partial field since the abbreviated TOC can not populate the entire
+/// SbeXipItem structure.
+///
+/// \note This API should typically only be used as a predicate, not as a way
+/// to access the image via the returned SbeXipItem structure. To obtain data
+/// from the image or update data in the image use the sbe_xip_get_*() and
+/// sbe_xip_set_*() APIs respectively.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_find(void* i_image,
+ const char* i_id,
+ SbeXipItem* o_item);
+
+
+/// Map over an SBE-XIP image .halt section
+///
+/// \param[in,out] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections.
+///
+/// \param[in] i_fn A pointer to a function to call on each entry in .halt.
+/// The function has the prototype:
+///
+/// \code
+/// int (*i_fn)(void* io_image,
+/// const uint64_t i_poreAddress,
+/// const char* i_rcString,
+/// void* io_arg)
+///
+/// \endcode
+///
+/// \param[in,out] io_arg The private argument of \a i_fn.
+///
+/// This API iterates over each entry of the .halt section, calling \a i_fn
+/// with each HALT address, the string form of the return code associated with
+/// that HALT address, and a private argument. The iteration terminates either
+/// when all .halt entries have been mapped, or \a i_fn returns a non-zero
+/// code. The \a i_poreAddddress passed to \a i_fn is the full 48-bit
+/// relocatable PORE address.
+///
+/// \retval 0 Success, including the case that the image has no .halt section.
+///
+/// \retval non-0 May be either one of the SBE-XIP image error codes (see \ref
+/// sbe_xip_image_errors), or any non-zero code from \a i_fn. Since the
+/// standard SBE_XIP return codes are \> 0, application-defined codes should
+/// be \< 0.
+int
+sbe_xip_map_halt(void* io_image,
+ int (*i_fn)(void* io_image,
+ const uint64_t i_poreAddress,
+ const char* i_rcString,
+ void* io_arg),
+ void* io_arg);
+
+
+/// Get the string from of a HALT code from an SBE-XIP image .halt section
+///
+/// \param[in,out] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections.
+///
+/// \param[in] i_poreAddress This is the 48-bit address found in the PC when
+/// the PORE halts. This address is actually 4 bytes beyond the actual HALT
+/// instruction, however for simplicity this is the address used to index the
+/// HALT.
+///
+/// \param[out] o_rcString The caller provides the address of a string-pointer
+/// variable which is updated with a pointer to the string form of the halt
+/// code associated with \a i_poreAddress (assuming a successful completion).
+///
+/// \retval 0 Success
+///
+/// \revtal SBE_XIP_ITEM_NOT_FOUND The \a i_poreAddress is not associated
+/// with a halt code in .halt.
+///
+/// \revtal Other See \ref sbe_xip_image_errors
+int
+sbe_xip_get_halt(void* io_image,
+ const uint64_t i_poreAddress,
+ const char** o_rcString);
+
+
+/// Delete a section from an SBE-XIP image in host memory
+///
+/// \param[in,out] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_sectionId Identifies the section to be deleted. See \ref
+/// sbe_xip_sections.
+///
+/// This API effectively deletes a section from an SBE-XIP image held in host
+/// memory. Unless the requested section \a i_section is already empty, only
+/// the final (highest address offset) section of the image may be deleted.
+/// Deleting the final section of the image means that the section size is set
+/// to 0, and the size of the image recorded in the header is reduced by the
+/// section size. Any alignment padding of the now-last section is also
+/// removed.
+///
+/// \note This API does not check for or warn if other sections in the image
+/// reference the deleted section.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_delete_section(void* io_image, const int i_sectionId);
+
+
+#ifndef PPC_HYP
+
+/// Duplicate a section from an SBE-XIP image in host memory
+///
+/// \param[in,out] i_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections.
+///
+/// \param[in] i_sectionId Identifies the section to be duplicated. See \ref
+/// sbe_xip_sections.
+///
+/// \param[out] o_duplicate At exit, points to the newly allocated and
+/// initialized duplicate of the given section. The caller is responsible for
+/// free()-ing this memory when no longer required.
+///
+/// \param[out] o_size At exit, contains the size (in bytes) of the duplicated
+/// section.
+///
+/// This API creates a bytewise duplicate of a non-empty section into newly
+/// malloc()-ed memory. At exit \a o_duplicate points to the duplicate, and \a
+/// o_size is set the the size of the duplicated section. The caller is
+/// responsible for free()-ing the memory when no longer required. The
+/// pointer at \a o_duplicate is set to NULL (0) and the \a o_size is set to 0
+/// in the event of any failure.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_duplicate_section(const void* i_image,
+ const int i_sectionId,
+ void** o_duplicate,
+ uint32_t* o_size);
+
+#endif // PPC_HYP
+
+
+/// Append binary data to an SBE-XIP image held in host memory
+///
+/// \param[in,out] io_image A pointer to an SBE-XIP image in host memory. The
+/// image is assumed to be consistent with the information contained in the
+/// header regarding the presence of and sizes of all sections. The image is
+/// also required to have been normalized.
+///
+/// \param[in] i_sectionId Identifies the section to contain the new data.
+///
+/// \param[in] i_data A pointer to the data to be appended to the image. If
+/// this pointer is NULL (0), then the effect is as if \a i_data were a
+/// pointer to an \a i_size array of 0 bytes.
+///
+/// \param[in] i_size The size of the data to be appended in bytes. If \a
+/// i_data is 0, then this is the number of bytes to clear.
+///
+/// \param[in] i_allocation The size of the memory region containing the
+/// image, measured from the first byte of the image. The call will fail if
+/// appending the new data plus any alignment padding would overflow the
+/// allocated memory.
+///
+/// \param[out] o_sectionOffset If non-0 at entry, then the API updates the
+/// location pointed to by \a o_sectionOffset with the offset of the first
+/// byte of the appended data within the indicated section. This return value
+/// is invalid in the event of a non-0 return code.
+///
+/// This API copies data from \a i_data to the end of the indicated \a
+/// i_section. The section \a i_section must either be empty, or must be the
+/// final (highest address) section in the image. If the section is initially
+/// empty and \a i_size is non-0 then the section is created at the end of the
+/// image. The size of \a i_section and the size of the image are always
+/// adjusted to reflect the newly added data. This is a simple binary copy
+/// without any interpretation (e.g., endian-translation) of the copied data.
+/// The caller is responsible for insuring that the host memory area
+/// containing the SBE-XIP image is large enough to hold the newly appended
+/// data without causing addressing errors or buffer overrun errors.
+///
+/// The final parameter \a o_sectionOffset is optional, and may be passed as
+/// NULL (0) if the application does not require the information. This return
+/// value is provided to simplify typical use cases of this API:
+///
+/// - A scan program is appended to the image, or a run-time data area is
+/// allocated and cleared at the end of the image.
+///
+/// - Pointer variables in the image are updated with PORE addresses obtained
+/// via sbe_xip_section2pore(), or
+/// other procedure code initializes a newly allocated and cleared data area
+/// via host addresses obtained from sbe_xip_section2host().
+///
+/// Regarding alignment, note that the SBE-XIP format requires that sections
+/// maintain an initial alignment that varies by section, and the API will
+/// enforce these alignment constraints for all sections created by the API.
+/// All alignment is relative to the first byte of the image (\a io_image) -
+/// \e not to the current in-memory address of the image. By specification
+/// SBE-XIP images must be loaded at a 4K alignment in order for PORE hardware
+/// relocation to work, however the APIs don't require this 4K alignment for
+/// in-memory manipulation of images. Images to be executed on PoreVe will
+/// normally require at least 8-byte final aligment in order to guarantee that
+/// the PoreVe can execute an 8-byte fetch or load/store of the final
+/// doubleword.
+///
+/// \note If the TOC section is modified then the image is marked as having an
+/// unsorted TOC.
+///
+/// \note If the call fails for any reason (other than a bug in the API
+/// itself) then the \a io_image data is returned unmodified.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_append(void* io_image,
+ const int i_sectionId,
+ const void* i_data,
+ const uint32_t i_size,
+ const uint32_t i_allocation,
+ uint32_t* o_sectionOffset);
+
+
+/// Convert an SBE-XIP section offset to a relocatable PORE address
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory
+///
+/// \param[in] i_sectionId A valid SBE-XIP section identifier; The section
+/// must be non-empty.
+///
+/// \param[in] i_offset An offset (in bytes) within the section. At least one
+/// byte at \a i_offset must be currently allocated in the section.
+///
+/// \param[in] o_poreAddress The equivalent relocatable PORE address is
+/// returned via this pointer. Since valid PORE addresses are always either
+/// 4-byte (code) or 8-byte (data) aligned, this API checks the aligment of
+/// the translated address and returns SBE_XIP_ALIGNMENT_ERROR if the PORE
+/// address is not at least 4-byte aligned. Note that the translated address
+/// is still returned even if incorrectly aligned.
+///
+/// This API is typically used to translate section offsets returned from
+/// sbe_xip_append() into relocatable PORE addresses.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_section2pore(const void* i_image,
+ const int i_sectionId,
+ const uint32_t i_offset,
+ uint64_t* o_poreAddress);
+
+
+/// Convert an SBE-XIP relocatable PORE address to a host memory address
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory.
+///
+/// \param[in] i_poreAddress A relocatable PORE address putatively addressing
+/// relocatable memory contained in the image.
+///
+/// \param[out] o_hostAddress The API updates the location pointed to by \a
+/// o_hostAddress with the host address of the memory addressed by \a
+/// i_poreAddress. In the event of an error (non-0 return code) the final
+/// content of \a o_hostAddress is undefined.
+///
+/// This API is typically used to translate relocatable PORE addresses stored
+/// in the SBE-XIP image into the equivalent host address of the in-memory
+/// image, allowing host-code to manipulate arbitrary data structures in the
+/// image. If the \a i_poreAddress does not refer to memory within the image
+/// (as determined by the link address and image size) then the
+/// SBE_XIP_INVALID_ARGUMENT error code is returned.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_pore2host(const void* i_image,
+ const uint64_t i_poreAddress,
+ void** o_hostAddress);
+
+
+/// Convert an SBE-XIP relocatable PORE address to section Id and offset
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory.
+///
+/// \param[in] i_poreAddress A relocatable PORE address putatively addressing
+/// relocatable memory contained in the image.
+///
+/// \param[out] o_section The API updates the location pointed to by \a
+/// o_section with the section Id of the memory addressed by \a
+/// i_poreAddress. In the event of an error (non-0 return code) the final
+/// content of \a o_section is undefined.
+///
+/// \param[out] o_offset The API updates the location pointed to by \a
+/// o_offset with the byte offset of the memory addressed by \a i_poreAddress
+/// within \a o_section. In the event of an error (non-0 return code) the
+/// final content of \a o_offset is undefined.
+///
+/// This API is typically used to translate relocatable PORE addresses stored
+/// in the SBE-XIP image into the equivalent section + offset form, allowing
+/// host-code to manipulate arbitrary data structures in the image. If the \a
+/// i_poreAddress does not refer to memory within the image (as determined by
+/// the link address and image size) then the SBE_XIP_INVALID_ARGUMENT error
+/// code is returned.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_pore2section(const void* i_image,
+ const uint64_t i_poreAddress,
+ int* o_section,
+ uint32_t* o_offset);
+
+
+/// Convert an in-memory SBE-XIP host address to a relocatable PORE address
+///
+/// \param[in] i_image A pointer to an SBE-XIP image in host memory
+///
+/// \param[in] i_hostAddress A host address addressing data within the image.
+///
+/// \param[out] o_poreAddress The API updates the location pointed to by \a
+/// o_poreAddress with the equivelent relocatable PORE address of the memory
+/// addressed by i_hostAddress. Since valid PORE addresses are always either
+/// 4-byte (code) or 8-byte (data) aligned, this API checks the aligment of
+/// the translated address and returns SBE_XIP_ALIGNMENT_ERROR if the PORE
+/// address is not at least 4-byte aligned. Note that the translated address
+/// is still returned evn if incorrectly aligned.
+///
+/// This API is provided as a convenient way to convert host memory addresses
+/// for an in-memory SBE-XIP image into PORE addresses correctly relocated for
+/// the image, for example to update pointer variables in the image. If the
+/// \a i_hostAddress does not refer to memory within the image (as determined
+/// by the image address and image size) then the SBE_XIP_INVALID_ARGUMENT
+/// error code is returned.
+///
+/// \retval 0 Success
+///
+/// \retval non-0 See \ref sbe_xip_image_errors
+int
+sbe_xip_host2pore(const void* i_image,
+ void* i_hostAddress,
+ uint64_t* o_poreAddress);
+
+
+/// \defgroup sbe_xip_image_errors Error codes from SBE-XIP image APIs
+///
+/// @{
+
+/// A putative SBE-XIP image does not have the correct magic number, or
+/// contains some other major inconsistency.
+#define SBE_XIP_IMAGE_ERROR 1
+
+/// The TOC may be missing, partially present or may have an alignment problem.
+#define SBE_XIP_TOC_ERROR 2
+
+/// A named item was not found in the SBE-XIP TOC, or a putative HALT address
+/// is not associated with a halt code in .halt.
+#define SBE_XIP_ITEM_NOT_FOUND 3
+
+/// A named item appears in the SBE-XIP TOC, but the data is not present in
+/// the image. This error can occur if sections have been deleted from the
+/// image.
+#define SBE_XIP_DATA_NOT_PRESENT 4
+
+/// A named item appears in the SBE-XIP TOC, but the data can not be
+/// modified. This error will occur if an attempt is made to modify an
+/// address-only entry.
+#define SBE_XIP_CANT_MODIFY 5
+
+/// A direct or implied argument is invalid, e.g. an illegal data type or
+/// section identifier, or an address not contained within the image.
+#define SBE_XIP_INVALID_ARGUMENT 6
+
+/// A data type mismatch or an illegal type was specified or implied for an
+/// operation.
+#define SBE_XIP_TYPE_ERROR 7
+
+/// A bug in an SBE-XIP image API
+#define SBE_XIP_BUG 8
+
+/// The image must first be normalized with sbe_xip_normalize().
+#define SBE_XIP_NOT_NORMALIZED 9
+
+/// Attempt to delete a non-empty section that is not the final section of the
+/// image, or an attempt to append data to a non-empty section that is not the
+/// final section of the image, or an attempt to operate on an empty section
+/// for those APIs that prohibit this.
+#define SBE_XIP_SECTION_ERROR 10
+
+/// An address translation API returned a PORE address that was not at least
+/// 4-byte aligned, or alignment violations were observed by
+/// sbe_xip_validate() or sbe_xip_append().
+#define SBE_XIP_ALIGNMENT_ERROR 11
+
+/// An API that performs dynamic memory allocation was unable to allocate
+/// memory.
+#define SBE_XIP_NO_MEMORY 12
+
+/// Attempt to get or set a vector element with an index that is outside of
+/// the declared bounds of the vector.
+#define SBE_XIP_BOUNDS_ERROR 13
+
+/// Attempt to grow the image past its defined memory allocation
+#define SBE_XIP_WOULD_OVERFLOW 14
+
+/// Error associated with the disassembler occured.
+#define SBE_XIP_DISASSEMBLER_ERROR 15
+
+/// hash collision creating the .fixed_toc section
+#define SBE_XIP_HASH_COLLISION 16
+
+/// Applications can expand this macro to declare an array of string forms of
+/// the error codes if desired.
+#define SBE_XIP_ERROR_STRINGS(var) \
+ const char* var[] = { \
+ "Success", \
+ "SBE_XIP_IMAGE_ERROR", \
+ "SBE_XIP_TOC_ERROR", \
+ "SBE_XIP_ITEM_NOT_FOUND", \
+ "SBE_XIP_DATA_NOT_PRESENT", \
+ "SBE_XIP_CANT_MODIFY", \
+ "SBE_XIP_INVALID_ARGUMENT", \
+ "SBE_XIP_TYPE_ERROR", \
+ "SBE_XIP_BUG", \
+ "SBE_XIP_NOT_NORMALIZED", \
+ "SBE_XIP_SECTION_ERROR", \
+ "SBE_XIP_ALIGNMENT_ERROR", \
+ "SBE_XIP_NO_MEMORY", \
+ "SBE_XIP_BOUNDS_ERROR", \
+ "SBE_XIP_WOULD_OVERFLOW", \
+ "SBE_XIP_DISASSEMBLER_ERROR", \
+ "SBE_XIP_HASH_COLLISION", \
+ }
+
+/// Applications can use this macro to safely index the array of error
+/// strings.
+#define SBE_XIP_ERROR_STRING(var, n) \
+ ((((n) < 0) || ((n) > (int)(sizeof(var) / sizeof(char*)))) ? \
+ "Bug : Invalid SBE-XIP error code" : var[n])
+
+/// @}
+
+/// Disassembler error codes.
+#define DIS_IMAGE_ERROR 1
+#define DIS_MEMORY_ERROR 2
+#define DIS_DISASM_ERROR 3
+#define DIS_RING_NAME_ADDR_MATCH_SUCCESS 4
+#define DIS_RING_NAME_ADDR_MATCH_FAILURE 5
+#define DIS_TOO_MANY_DISASM_WARNINGS 6
+#define DIS_DISASM_TROUBLES 7
+
+#define DIS_ERROR_STRINGS(var) \
+ const char* var[] = { \
+ "Success", \
+ "DIS_IMAGE_ERROR", \
+ "DIS_MEMORY_ERROR", \
+ "DIS_DISASM_ERROR", \
+ "DIS_RING_NAME_ADDR_MATCH_SUCCESS", \
+ "DIS_RING_NAME_ADDR_MATCH_FAILURE", \
+ "DIS_TOO_MANY_DISASM_WARNINGS", \
+ "DIS_DISASM_TROUBLES", \
+ }
+
+#define DIS_ERROR_STRING(var, n) \
+ ((((n) < 0) || ((n) > (int)(sizeof(var) / sizeof(char*)))) ? \
+ "Bug : Invalid DIS error code" : var[n])
+
+#if 0
+{ /* So __cplusplus doesn't mess w/auto-indent */
+#endif
+#ifdef __cplusplus
+}
+#endif
+
+#endif // __ASSEMBLER__
+
+
+////////////////////////////////////////////////////////////////////////////
+// Assembler Definitions
+////////////////////////////////////////////////////////////////////////////
+
+#ifdef __ASSEMBLER__
+
+/// Create an XIP TOC entry
+///
+/// \param[in] index The string form of the \a index symbol is created and
+/// linked from the TOC entry to allow external search procedures to locate
+/// the \a address.
+///
+/// \param[in] type One of the SBE_XIP_* type constants; See \ref
+/// sbe_xip_toc_types.
+///
+/// \param[in] address The address of the idexed code or data; This wlll
+/// typically be a symbol.
+///
+/// \param[in] elements <Optional> For vector types, number of elements in the
+/// vector, which is limited to an 8-bit unsigned integer. This parameter
+/// defaults to 1 which indicates a scalar type. Declaring a vector with 0
+/// elements disables bounds checking on vector accesses, and can be used if
+/// very large or indeterminate sized vectors are required. The TOC format
+/// does not support vectors of strings or addresses.
+///
+/// The \c .xip_toc macro creates a XIP Table of Contents (TOC) structure in
+/// the \c .toc section, as specified by the parameters. This macro is
+/// typically not used directly in assembly code. Instead programmers should
+/// use .xip_quad, .xip_quada, .xip_quadia, .xip_address, .xip_string or
+/// .xip_cvs_revision.
+
+ .macro .xip_toc, index:req, type:req, address:req, elements=1
+
+ .if (((\type) < 1) || ((\type) > SBE_XIP_MAX_TYPE_INDEX))
+ .error ".xip_toc : Illegal type index"
+ .endif
+
+ // First push into the .strings section to lay down the
+ // string form of the index name under a local label.
+
+ .pushsection .strings
+7667862:
+ .asciz "\index"
+ .popsection
+
+ // Now the 12-byte TOC entry is created. Push into the .toc section
+ // and lay down the first 4 bytes which are always a pointer to the
+ // string just declared. The next 4 bytes are the address of the data
+ // (or the address itself in the case of address types). The final 4
+ // bytes are the type, section (always 0 prior to normalization),
+ // number of elements, and a padding byte.
+
+ .pushsection .toc
+
+ .long 7667862b, (\address)
+ .byte (\type), 0, (\elements), 0
+
+ .popsection
+
+ .endm
+
+
+/// Allocate and initialize 64-bit global scalar or vector data and create the
+/// TOC entry.
+///
+/// \param[in] symbol The name of the scalar or vector; this name is also used
+/// as the TOC index of the data.
+///
+/// \param[in] init The initial value of (each element of) the data.
+/// This is a 64-bit integer; To allocate address pointers use .xip_quada.
+///
+/// \param[in] elements The number of 64-bit elements in the data structure,
+/// defaulting to 1, with a maximum value of 255.
+///
+/// \param[in] section The section where the data will be allocated,
+/// default depends on the memory space
+
+ .macro .xip_quad, symbol:req, init:req, elements=1, section
+
+ ..xip_quad_helper .quad, \symbol, (\init), (\elements), \section
+
+ .endm
+
+
+/// Allocate and initialize 64-bit global scalar or vector data containing a
+/// relocatable address in and create the TOC entry.
+///
+/// \param[in] symbol The name of the scalar or vector; this name is also used
+/// as the TOC index of the data.
+///
+/// \param[in] init The initial value of (each element of) the data. This
+/// will typically be a symbolic address. If the intention is to define an
+/// address that will always be filled in later by image manipulation tools,
+/// then use the .xip_quad macro with a 0 initial value.
+///
+/// \param[in] elements The number of 64-bit elements in the data structure,
+/// defaulting to 1, with a maximum value of 255.
+///
+/// \param[in] section The section where the data will be allocated,
+/// default depends on the memory space
+
+ .macro .xip_quada, symbol:req, offset:req, elements=1, section
+
+ ..xip_quad_helper .quada, \symbol, (\offset), (\elements), \section
+
+ .endm
+
+
+/// Helper for .xip_quad and .xip_quada
+
+ .macro ..xip_quad_helper, directive, symbol, init, elements, section
+
+ .if (((\elements) < 1) || ((\elements) > 255))
+ .error "The number of vector elements must be in the range 1..255"
+ .endif
+
+ ..xip_pushsection \section
+ .balign 8
+
+ .global \symbol
+\symbol\():
+ .rept (\elements)
+ \directive (\init)
+ .endr
+
+ .popsection
+
+ .xip_toc \symbol, SBE_XIP_UINT64, \symbol, (\elements)
+
+ .endm
+
+
+/// Allocate and initialize 64-bit global scalar or vector data containing
+/// full 64-bit addresses and create a TOC entry
+///
+/// \param[in] symbol The name of the scalar or vector; this name is also used
+/// as the TOC index of the data.
+///
+/// \param[in] space A valid PORE memory space descriptor
+///
+/// \param[in] offset A 32-bit relocatable offset
+///
+/// \param[in] elements The number of 64-bit elements in the data structure,
+/// defaulting to 1, with a maximum value of 255.
+///
+/// \param[in] section The section where the data will be allocated,
+/// default depends on the memory space
+
+ .macro .xip_quadia, symbol:req, space:req, offset:req, \
+ elements=1, section
+
+ .if (((\elements) < 1) || ((\elements) > 255))
+ .error "The number of vector elements must be in the range 1..255"
+ .endif
+
+ ..xip_pushsection \section
+ .balign 8
+
+ .global \symbol
+\symbol\():
+ .rept (\elements)
+ .quadia (\space), (\offset)
+ .endr
+
+ .popsection
+
+ .xip_toc \symbol, SBE_XIP_UINT64, \symbol, (\elements)
+
+ .endm
+
+/// Default push into .ipl_data unless in an OCI space, then .data
+
+ .macro ..xip_pushsection, section
+
+ .ifnb \section
+ .pushsection \section
+ .else
+ .if (_PGAS_DEFAULT_SPACE == PORE_SPACE_OCI)
+ .pushsection .data
+ .else
+ .pushsection .ipl_data
+ .endif
+ .endif
+
+ .balign 8
+
+ .endm
+
+/// Allocate and initialize a string in .strings
+///
+/// \param[in] index The string will be stored in the TOC using this index
+/// symbol.
+///
+/// \param[in] string The string to be allocated in .strings. String space is
+/// fixed once allocated. Strings designed to be overwritten by external tools
+/// should be allocated to be as long as eventually needed (e.g., by a string
+/// of blanks.)
+
+ .macro .xip_string, index:req, string:req
+
+ .pushsection .strings
+7874647:
+ .asciz "\string"
+ .popsection
+
+ .xip_toc \index, SBE_XIP_STRING, 7874647b
+
+ .endm
+
+
+/// Allocate and initialize a CVS Revison string in .strings
+///
+/// \param[in] index The string will be stored in the TOC using this index
+/// symbol.
+///
+/// \param[in] string A CVS revision string to be allocated in .strings. CVS
+/// revision strings are formatted by stripping out and only storing the
+/// actual revision number :
+///
+/// \code
+/// "$Revision <n>.<m> $" -> "<n>.<m>"
+/// \endcode
+
+
+ .macro .xip_cvs_revision, index:req, string:req
+
+ .pushsection .strings
+7874647:
+ ..cvs_revision_string "\string"
+ .popsection
+
+ .xip_toc \index, SBE_XIP_STRING, 7874647b
+
+ .endm
+
+
+/// Shorthand to create a TOC entry for an address
+///
+/// \param[in] index The symbol will be indexed as this name
+///
+/// \param[in] symbol <Optional> The symbol to index; by default the same as
+/// the index.
+
+ .macro .xip_address, index:req, symbol
+
+ .ifb \symbol
+ .xip_toc \index, SBE_XIP_ADDRESS, \index
+ .else
+ .xip_toc \index, SBE_XIP_ADDRESS, \symbol
+ .endif
+
+ .endm
+
+
+/// Edit and allocate a CVS revision string
+///
+/// CVS revision strings are formatted by stripping out and only storing the
+/// actual revision number :
+/// \code
+/// "$Revision <n>.<m> $" -> "<n>.<m>"
+/// \endcode
+
+ .macro ..cvs_revision_string, rev:req
+ .irpc c, \rev
+ .ifnc "\c", "$"
+ .ifnc "\c", "R"
+ .ifnc "\c", "e"
+ .ifnc "\c", "v"
+ .ifnc "\c", "i"
+ .ifnc "\c", "s"
+ .ifnc "\c", "i"
+ .ifnc "\c", "o"
+ .ifnc "\c", "n"
+ .ifnc "\c", ":"
+ .ifnc "\c", " "
+ .ascii "\c"
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endr
+ .byte 0
+ .endm
+
+#endif // __ASSEMBLER__
+
+#endif // __SBE_XIP_TOC_H
diff --git a/make_offsets.sh b/make_offsets.sh
new file mode 100755
index 0000000..c4eb1c5
--- /dev/null
+++ b/make_offsets.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+cat <<EOF
+#ifndef ASM_OFFSETS_H
+#define ASM_OFFSETS_H
+/* Derived from $1 by make_offsets.sh */
+
+`grep '^#define' $1`
+#endif /* ASM_OFFSETS_H */
+EOF
diff --git a/platforms/Makefile.inc b/platforms/Makefile.inc
new file mode 100644
index 0000000..5061b73
--- /dev/null
+++ b/platforms/Makefile.inc
@@ -0,0 +1,10 @@
+PLATDIR = platforms
+
+SUBDIRS += $(PLATDIR)
+PLATFORMS = $(PLATDIR)/built-in.o
+
+include $(SRC)/$(PLATDIR)/ibm-fsp/Makefile.inc
+include $(SRC)/$(PLATDIR)/rhesus/Makefile.inc
+include $(SRC)/$(PLATDIR)/bmc/Makefile.inc
+
+$(PLATFORMS): $(IBM_FSP) $(RHESUS) $(BMC)
diff --git a/platforms/bmc/Makefile.inc b/platforms/bmc/Makefile.inc
new file mode 100644
index 0000000..348e594
--- /dev/null
+++ b/platforms/bmc/Makefile.inc
@@ -0,0 +1,6 @@
+SUBDIRS += $(PLATDIR)/bmc
+
+BMC_OBJS = palmetto.o pnor.o
+BMC = $(PLATDIR)/bmc/built-in.o
+$(BMC): $(BMC_OBJS:%=$(PLATDIR)/bmc/%)
+
diff --git a/platforms/bmc/bmc.h b/platforms/bmc/bmc.h
new file mode 100644
index 0000000..59d20bb
--- /dev/null
+++ b/platforms/bmc/bmc.h
@@ -0,0 +1,24 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __BMC_H
+#define __BMC_H
+
+extern void bmc_ext_irq(uint32_t chip_id);
+extern int pnor_init(void);
+
+#endif /* __BMC_H */
diff --git a/platforms/bmc/palmetto.c b/platforms/bmc/palmetto.c
new file mode 100644
index 0000000..42905de
--- /dev/null
+++ b/platforms/bmc/palmetto.c
@@ -0,0 +1,186 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <device.h>
+#include <console.h>
+#include <psi.h>
+#include <chip.h>
+#include <xscom.h>
+#include <ast.h>
+
+#include "bmc.h"
+
+/* UART1 config */
+#define UART_IO_BASE 0x3f8
+#define UART_IO_COUNT 8
+
+#define UART_LPC_IRQ 4
+
+static void palmetto_ext_irq(unsigned int chip_id __unused)
+{
+ uart_irq();
+}
+
+static void palmetto_init(void)
+{
+ /* Setup dummy console nodes */
+ if (dummy_console_enabled())
+ dummy_console_add_nodes();
+
+ /* Initialize AHB accesses via AST2400 */
+ ast_io_init();
+
+ /* Initialize PNOR/NVRAM */
+ pnor_init();
+}
+
+
+static void palmetto_fixup_dt_uart(struct dt_node *lpc)
+{
+ /*
+ * The official OF ISA/LPC binding is a bit odd, it prefixes
+ * the unit address for IO with "i". It uses 2 cells, the first
+ * one indicating IO vs. Memory space (along with bits to
+ * represent aliasing).
+ *
+ * We pickup that binding and add to it "2" as a indication
+ * of FW space.
+ */
+ struct dt_node *uart;
+ char namebuf[32];
+
+ /* First check if the UART is already there */
+ dt_for_each_child(lpc, uart) {
+ if (dt_node_is_compatible(uart, "ns16550"))
+ return;
+ }
+
+ /* Otherwise, add a node for it */
+ sprintf(namebuf, "serial@i%x", UART_IO_BASE);
+ uart = dt_new(lpc, namebuf);
+
+ dt_add_property_cells(uart, "reg",
+ 1, /* IO space */
+ UART_IO_BASE, UART_IO_COUNT);
+ dt_add_property_strings(uart, "compatible",
+ "ns16550",
+ "pnpPNP,501");
+ dt_add_property_cells(uart, "clock-frequency", 1843200);
+ dt_add_property_cells(uart, "current-speed", 115200);
+
+ /*
+ * This is needed by Linux for some obscure reasons,
+ * we'll eventually need to sanitize it but in the meantime
+ * let's make sure it's there
+ */
+ dt_add_property_strings(uart, "device_type", "serial");
+
+ /*
+ * Add interrupt. This simulates coming from HostBoot which
+ * does not know our interrupt numbering scheme. Instead, it
+ * just tells us which chip the interrupt is wired to, it will
+ * be the PSI "host error" interrupt of that chip. For now we
+ * assume the same chip as the LPC bus is on.
+ */
+ dt_add_property_cells(uart, "ibm,irq-chip-id", dt_get_chip_id(lpc));
+}
+
+static void palmetto_fixup_dt(void)
+{
+ struct dt_node *n, *primary_lpc = NULL;
+
+ /* Find the primary LPC bus */
+ dt_for_each_compatible(dt_root, n, "ibm,power8-lpc") {
+ if (!primary_lpc || dt_has_node_property(n, "primary", NULL))
+ primary_lpc = n;
+ if (dt_has_node_property(n, "#address-cells", NULL))
+ break;
+ }
+
+ if (!primary_lpc)
+ return;
+
+ /* Fixup the UART, that might be missing from HB */
+ palmetto_fixup_dt_uart(primary_lpc);
+
+ /* Force the dummy console for now */
+ force_dummy_console();
+}
+
+static void palmetto_fixup_psi_bar(void)
+{
+ struct proc_chip *chip = next_chip(NULL);
+ uint64_t psibar;
+
+ /* Read PSI BAR */
+ if (xscom_read(chip->id, 0x201090A, &psibar)) {
+ prerror("PLAT: Error reading PSI BAR\n");
+ return;
+ }
+ /* Already configured, bail out */
+ if (psibar & 1)
+ return;
+
+ /* Hard wire ... yuck */
+ psibar = 0x3fffe80000001;
+
+ printf("PLAT: Fixing up PSI BAR on chip %d BAR=%llx\n",
+ chip->id, psibar);
+
+ /* Now write it */
+ xscom_write(chip->id, 0x201090A, psibar);
+}
+
+static bool palmetto_probe(void)
+{
+ const char *model;
+
+ if (!dt_node_is_compatible(dt_root, "ibm,powernv"))
+ return false;
+
+ /* Temporary ... eventually we'll get that in compatible */
+ model = dt_prop_get_def(dt_root, "model", NULL);
+ if ((!model || !strstr(model, "palmetto")) &&
+ (!dt_node_is_compatible(dt_root, "ibm,palmetto")))
+ return false;
+
+ /* Hostboot's device-tree isn't quite right yet */
+ palmetto_fixup_dt();
+
+ /* Hostboot forgets to populate the PSI BAR */
+ palmetto_fixup_psi_bar();
+
+ /* Send external interrupts to me */
+ psi_set_external_irq_policy(EXTERNAL_IRQ_POLICY_SKIBOOT);
+
+ /* Configure UART1 on SuperIO */
+ ast_setup_uart1(UART_IO_BASE, UART_LPC_IRQ);
+
+ /* Setup UART and use it as console with interrupts */
+ uart_init(true);
+
+ return true;
+}
+
+DECLARE_PLATFORM(palmetto) = {
+ .name = "Palmetto",
+ .probe = palmetto_probe,
+ .init = palmetto_init,
+ .external_irq = palmetto_ext_irq,
+};
+
diff --git a/platforms/bmc/pnor.c b/platforms/bmc/pnor.c
new file mode 100644
index 0000000..878615d
--- /dev/null
+++ b/platforms/bmc/pnor.c
@@ -0,0 +1,87 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <device.h>
+#include <console.h>
+#include <opal.h>
+#include <libflash/libflash.h>
+#include <libflash/libffs.h>
+
+#include "bmc.h"
+#include "ast.h"
+
+static struct spi_flash_ctrl *pnor_ctrl;
+static struct flash_chip *pnor_chip;
+static struct ffs_handle *pnor_ffs;
+
+int pnor_init(void)
+{
+ uint32_t nv_part, nv_start, nv_size;
+ int rc;
+
+ /* Open controller, flash and ffs */
+ rc = ast_sf_open(AST_SF_TYPE_PNOR, &pnor_ctrl);
+ if (rc) {
+ prerror("PLAT: Failed to open PNOR flash controller\n");
+ goto fail;
+ }
+ rc = flash_init(pnor_ctrl, &pnor_chip);
+ if (rc) {
+ prerror("PLAT: Failed to open init PNOR driver\n");
+ goto fail;
+ }
+ rc = ffs_open_flash(pnor_chip, 0, 0, &pnor_ffs);
+ if (rc) {
+ prerror("PLAT: Failed to parse FFS partition map\n");
+ goto fail;
+ }
+
+ /*
+ * Grab NVRAM and initialize the flash_nvram module
+ *
+ * Note: Ignore actual size for now ... some images have
+ * it setup incorrectly.
+ */
+ rc = ffs_lookup_part(pnor_ffs, "NVRAM", &nv_part);
+ if (rc) {
+ prerror("PLAT: No NVRAM partition in PNOR\n");
+ return OPAL_HARDWARE;
+ }
+ rc = ffs_part_info(pnor_ffs, nv_part, NULL,
+ &nv_start, &nv_size, NULL);
+ if (rc) {
+ prerror("PLAT: Failed to get NVRAM partition info\n");
+ return OPAL_HARDWARE;
+ }
+ flash_nvram_init(pnor_chip, nv_start, nv_size);
+
+ return 0;
+ fail:
+ if (pnor_ffs)
+ ffs_close(pnor_ffs);
+ pnor_ffs = NULL;
+ if (pnor_chip)
+ flash_exit(pnor_chip);
+ pnor_chip = NULL;
+ if (pnor_ctrl)
+ ast_sf_close(pnor_ctrl);
+ pnor_ctrl = NULL;
+
+ return rc;
+}
+
diff --git a/platforms/ibm-fsp/Makefile.inc b/platforms/ibm-fsp/Makefile.inc
new file mode 100644
index 0000000..a885cbb
--- /dev/null
+++ b/platforms/ibm-fsp/Makefile.inc
@@ -0,0 +1,6 @@
+SUBDIRS += $(PLATDIR)/ibm-fsp
+
+IBM_FSP_OBJS = common.o lxvpd.o apollo.o firenze.o
+IBM_FSP = $(PLATDIR)/ibm-fsp/built-in.o
+$(IBM_FSP): $(IBM_FSP_OBJS:%=$(PLATDIR)/ibm-fsp/%)
+
diff --git a/platforms/ibm-fsp/apollo.c b/platforms/ibm-fsp/apollo.c
new file mode 100644
index 0000000..0d8fb4b
--- /dev/null
+++ b/platforms/ibm-fsp/apollo.c
@@ -0,0 +1,62 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <device.h>
+#include <fsp.h>
+#include <pci.h>
+
+#include "ibm-fsp.h"
+#include "lxvpd.h"
+
+static bool apollo_probe(void)
+{
+ return dt_node_is_compatible(dt_root, "ibm,apollo");
+}
+
+static void apollo_setup_phb(struct phb *phb, unsigned int index)
+{
+ struct dt_node *ioc_node;
+
+ /* Grab the device-tree node of the IOC */
+ ioc_node = phb->dt_node->parent;
+ if (!ioc_node)
+ return;
+
+ /*
+ * Process the pcie slot entries from the lx vpd lid
+ *
+ * FIXME: We currently assume chip 1 always, this will have to be
+ * fixed once we understand the right way to get the BRxy/BRxy "x"
+ * "x" value. (this actually seems to work...)
+ */
+ lxvpd_process_slot_entries(phb, ioc_node, 1, index);
+}
+
+DECLARE_PLATFORM(apollo) = {
+ .name = "Apollo",
+ .probe = apollo_probe,
+ .init = ibm_fsp_init,
+ .cec_power_down = ibm_fsp_cec_power_down,
+ .cec_reboot = ibm_fsp_cec_reboot,
+ .pci_setup_phb = apollo_setup_phb,
+ .pci_get_slot_info = lxvpd_get_slot_info,
+ .nvram_info = fsp_nvram_info,
+ .nvram_start_read = fsp_nvram_start_read,
+ .nvram_write = fsp_nvram_write,
+};
+
diff --git a/platforms/ibm-fsp/common.c b/platforms/ibm-fsp/common.c
new file mode 100644
index 0000000..6d5ee17
--- /dev/null
+++ b/platforms/ibm-fsp/common.c
@@ -0,0 +1,196 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <fsp.h>
+#include <fsp-sysparam.h>
+#include <opal.h>
+#include <console.h>
+
+#include "ibm-fsp.h"
+
+static void map_debug_areas(void)
+{
+ uint64_t t, i;
+
+ /* Our memcons is in a section of its own and already
+ * aligned to 4K. The buffers are mapped as a whole
+ */
+ fsp_tce_map(PSI_DMA_MEMCONS, &memcons, 0x1000);
+ fsp_tce_map(PSI_DMA_LOG_BUF, (void*)INMEM_CON_START, INMEM_CON_LEN);
+
+ debug_descriptor.memcons_tce = PSI_DMA_MEMCONS;
+ t = memcons.obuf_phys - INMEM_CON_START + PSI_DMA_LOG_BUF;
+ debug_descriptor.memcons_obuf_tce = t;
+ t = memcons.ibuf_phys - INMEM_CON_START + PSI_DMA_LOG_BUF;
+ debug_descriptor.memcons_ibuf_tce = t;
+
+ /* We only have space in the TCE table for the trace
+ * areas on P8
+ */
+ if (proc_gen != proc_gen_p8)
+ return;
+
+ t = PSI_DMA_TRACE_BASE;
+ for (i = 0; i < debug_descriptor.num_traces; i++) {
+ /*
+ * Trace buffers are misaligned by 0x10 due to the lock
+ * in the trace structure, and their size is also not
+ * completely aligned. (They are allocated so that with
+ * the lock included, they do cover entire multiple of
+ * a 4K page however).
+ *
+ * This means we have to map the lock into the TCEs and
+ * align everything. Not a huge deal but needs to be
+ * taken into account.
+ *
+ * Note: Maybe we should map them read-only...
+ */
+ uint64_t tstart, tend, toff, tsize;
+
+ tstart = ALIGN_DOWN(debug_descriptor.trace_phys[i], 0x1000);
+ tend = ALIGN_UP(debug_descriptor.trace_phys[i] +
+ debug_descriptor.trace_size[i], 0x1000);
+ toff = debug_descriptor.trace_phys[i] - tstart;
+ tsize = tend - tstart;
+
+ fsp_tce_map(t, (void *)tstart, tsize);
+ debug_descriptor.trace_tce[i] = t + toff;
+ t += tsize;
+ }
+}
+
+
+void ibm_fsp_init(void)
+{
+ /* Early initializations of the FSP interface */
+ fsp_init();
+ map_debug_areas();
+ fsp_sysparam_init();
+
+ /* Get ready to receive E0 class messages. We need to respond
+ * to some of these for the init sequence to make forward progress
+ */
+ fsp_console_preinit();
+
+ /* Get ready to receive OCC related messages */
+ occ_fsp_init();
+
+ /* Get ready to receive Memory [Un]corretable Error messages. */
+ fsp_memory_err_init();
+
+ /* Initialize elog access */
+ fsp_elog_read_init();
+ fsp_elog_write_init();
+
+ /* Initiate dump service */
+ fsp_dump_init();
+
+ /* Start FSP/HV state controller & perform OPL */
+ fsp_opl();
+
+ /* Send MDST table notification to FSP */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0000);
+ fsp_mdst_table_init();
+
+ /* Initialize the panel */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0001);
+ fsp_oppanel_init();
+
+ /* Start the surveillance process */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0002);
+ fsp_init_surveillance();
+
+ /* Initialize sensor access */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0003);
+ fsp_init_sensor();
+
+ /* LED */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0004);
+ fsp_led_init();
+
+ /* Monitor for DIAG events */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0005);
+ fsp_init_diag();
+
+ /* Finish initializing the console */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0006);
+ fsp_console_init();
+
+ /* Read our initial RTC value */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0008);
+ fsp_rtc_init();
+
+ /* Initialize code update access */
+ op_display(OP_LOG, OP_MOD_INIT, 0x0009);
+ fsp_code_update_init();
+
+ /* Setup console */
+ if (fsp_present())
+ fsp_console_add_nodes();
+ else if (dummy_console_enabled())
+ dummy_console_add_nodes();
+}
+
+int64_t ibm_fsp_cec_reboot(void)
+{
+ uint32_t cmd = FSP_CMD_REBOOT;
+
+ if (!fsp_present())
+ return OPAL_UNSUPPORTED;
+
+ /* Flash new firmware */
+ if (fsp_flash_term_hook &&
+ fsp_flash_term_hook() == OPAL_SUCCESS)
+ cmd = FSP_CMD_DEEP_REBOOT;
+
+ printf("FSP: Sending 0x%02x reboot command to FSP...\n", cmd);
+
+ /* If that failed, talk to the FSP */
+ if (fsp_sync_msg(fsp_mkmsg(cmd, 0), true))
+ return OPAL_INTERNAL_ERROR;
+
+ return OPAL_SUCCESS;
+}
+
+int64_t ibm_fsp_cec_power_down(uint64_t request)
+{
+ /* Request is:
+ *
+ * 0 = normal
+ * 1 = immediate
+ * (we do not allow 2 for "pci cfg reset" just yet)
+ */
+
+ if (request !=0 && request != 1)
+ return OPAL_PARAMETER;
+
+ if (!fsp_present())
+ return OPAL_UNSUPPORTED;
+
+ /* Flash new firmware */
+ if (fsp_flash_term_hook)
+ fsp_flash_term_hook();
+
+ printf("FSP: Sending shutdown command to FSP...\n");
+
+ if (fsp_sync_msg(fsp_mkmsg(FSP_CMD_POWERDOWN_NORM, 1, request), true))
+ return OPAL_INTERNAL_ERROR;
+
+ return OPAL_SUCCESS;
+}
+
diff --git a/platforms/ibm-fsp/firenze.c b/platforms/ibm-fsp/firenze.c
new file mode 100644
index 0000000..ae72e21
--- /dev/null
+++ b/platforms/ibm-fsp/firenze.c
@@ -0,0 +1,247 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <device.h>
+#include <fsp.h>
+#include <pci.h>
+#include <pci-cfg.h>
+#include <chip.h>
+
+#include "ibm-fsp.h"
+#include "lxvpd.h"
+
+/* Enable that to dump the PCIe inventory table before sending to FSP */
+#define DEBUG_INVENTORY
+
+/* Structure used to send PCIe card info to FSP */
+struct fsp_pcie_entry {
+ uint32_t hw_proc_id;
+ uint16_t slot_idx;
+ uint16_t reserved;
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint16_t subsys_vendor_id;
+ uint16_t subsys_device_id;
+};
+
+struct fsp_pcie_inventory {
+ uint32_t version; /* currently 1 */
+ uint32_t num_entries;
+ uint32_t entry_size;
+ uint32_t entry_offset;
+ struct fsp_pcie_entry entries[];
+};
+
+static struct fsp_pcie_inventory *fsp_pcie_inv;
+static unsigned int fsp_pcie_inv_alloc_count;
+#define FSP_PCIE_INV_ALLOC_CHUNK 4
+
+static bool firenze_probe(void)
+{
+ return dt_node_is_compatible(dt_root, "ibm,firenze");
+}
+
+static void firenze_send_pci_inventory(void)
+{
+ uint64_t base, abase, end, aend, offset;
+ int64_t rc;
+
+ if (!fsp_pcie_inv)
+ return;
+
+ printf("PLAT: Sending PCI inventory to FSP, table has %d entries\n",
+ fsp_pcie_inv->num_entries);
+
+#ifdef DEBUG_INVENTORY
+ {
+ unsigned int i;
+
+ printf("HWP SLT VDID DVID SVID SDID\n");
+ printf("---------------------------\n");
+ for (i = 0; i < fsp_pcie_inv->num_entries; i++) {
+ struct fsp_pcie_entry *e = &fsp_pcie_inv->entries[i];
+
+ printf("%03d %03d %04x %04x %04x %04x\n",
+ e->hw_proc_id, e->slot_idx,
+ e->vendor_id, e->device_id,
+ e->subsys_vendor_id, e->subsys_device_id);
+ }
+ }
+#endif
+
+ /*
+ * Get the location of the table in a form we can send
+ * to the FSP
+ */
+ base = (uint64_t)fsp_pcie_inv;
+ end = base + sizeof(struct fsp_pcie_inventory) +
+ fsp_pcie_inv->num_entries * fsp_pcie_inv->entry_size;
+ abase = base & ~0xffful;
+ aend = (end + 0xffful) & ~0xffful;
+ offset = PSI_DMA_PCIE_INVENTORY + (base & 0xfff);
+
+ /* We can only accomodate so many entries in the PSI map */
+ if ((aend - abase) > PSI_DMA_PCIE_INVENTORY_SIZE) {
+ prerror("PLAT: PCIe inventory too large (%lld bytes)\n",
+ aend - abase);
+ goto bail;
+ }
+
+ /* Map this in the TCEs */
+ fsp_tce_map(PSI_DMA_PCIE_INVENTORY, (void *)abase, aend - abase);
+
+ /* Send FSP message */
+ rc = fsp_sync_msg(fsp_mkmsg(FSP_CMD_PCI_POWER_CONF, 3,
+ hi32(offset), lo32(offset),
+ end - base), true);
+ if (rc)
+ prerror("PLAT: FSP error %lld sending inventory\n", rc);
+
+ /* Unmap */
+ fsp_tce_unmap(PSI_DMA_PCIE_INVENTORY, aend - abase);
+ bail:
+ /*
+ * We free the inventory. We'll have to redo that on hotplug
+ * when we support it but that isn't the case yet
+ */
+ free(fsp_pcie_inv);
+ fsp_pcie_inv = NULL;
+}
+
+static void firenze_add_pcidev_to_fsp_inventory(struct phb *phb,
+ struct pci_device *pd)
+{
+ struct fsp_pcie_entry *entry;
+ struct proc_chip *chip;
+
+ /* Check if we need to do some (Re)allocation */
+ if (!fsp_pcie_inv ||
+ fsp_pcie_inv->num_entries == fsp_pcie_inv_alloc_count) {
+ unsigned int new_count;
+ size_t new_size;
+ bool need_init = !fsp_pcie_inv;
+
+ /* (Re)allocate the block to the new size */
+ new_count = fsp_pcie_inv_alloc_count + FSP_PCIE_INV_ALLOC_CHUNK;
+ new_size = sizeof(struct fsp_pcie_inventory);
+ new_size += sizeof(struct fsp_pcie_entry) * new_count;
+ fsp_pcie_inv = realloc(fsp_pcie_inv, new_size);
+ fsp_pcie_inv_alloc_count = new_count;
+
+ /* Initialize the header for a new inventory */
+ if (need_init) {
+ fsp_pcie_inv->version = 1;
+ fsp_pcie_inv->num_entries = 0;
+ fsp_pcie_inv->entry_size =
+ sizeof(struct fsp_pcie_entry);
+ fsp_pcie_inv->entry_offset =
+ offsetof(struct fsp_pcie_inventory, entries);
+ }
+ }
+
+ /* Add entry */
+ entry = &fsp_pcie_inv->entries[fsp_pcie_inv->num_entries++];
+ chip = get_chip(dt_get_chip_id(phb->dt_node));
+ if (!chip) {
+ prerror("PLAT: Failed to get chip for PHB !\n");
+ return;
+ }
+ entry->hw_proc_id = chip->pcid;
+ entry->slot_idx = pd->parent->slot_info->slot_index;
+ entry->reserved = 0;
+ pci_cfg_read16(phb, pd->bdfn, PCI_CFG_VENDOR_ID, &entry->vendor_id);
+ pci_cfg_read16(phb, pd->bdfn, PCI_CFG_DEVICE_ID, &entry->device_id);
+ if (pd->is_bridge) {
+ int64_t ssvc = pci_find_cap(phb, pd->bdfn,
+ PCI_CFG_CAP_ID_SUBSYS_VID);
+ if (ssvc < 0) {
+ entry->subsys_vendor_id = 0xffff;
+ entry->subsys_device_id = 0xffff;
+ } else {
+ pci_cfg_read16(phb, pd->bdfn,
+ ssvc + PCICAP_SUBSYS_VID_VENDOR,
+ &entry->subsys_vendor_id);
+ pci_cfg_read16(phb, pd->bdfn,
+ ssvc + PCICAP_SUBSYS_VID_DEVICE,
+ &entry->subsys_device_id);
+ }
+ } else {
+ pci_cfg_read16(phb, pd->bdfn, PCI_CFG_SUBSYS_VENDOR_ID,
+ &entry->subsys_vendor_id);
+ pci_cfg_read16(phb, pd->bdfn, PCI_CFG_SUBSYS_ID,
+ &entry->subsys_device_id);
+ }
+}
+
+static void firenze_get_slot_info(struct phb *phb, struct pci_device * pd)
+{
+ /* Call the main LXVPD function first */
+ lxvpd_get_slot_info(phb, pd);
+
+ /*
+ * Do we need to add that to the FSP inventory for power management ?
+ *
+ * For now, we only add devices that:
+ *
+ * - Are function 0
+ * - Are not an RC or a downstream bridge
+ * - Have a direct parent that has a slot entry
+ * - Slot entry says pluggable
+ * - Aren't an upstream switch that has slot info
+ */
+ if (!pd || !pd->parent)
+ return;
+ if (pd->bdfn & 7)
+ return;
+ if (pd->dev_type == PCIE_TYPE_ROOT_PORT ||
+ pd->dev_type == PCIE_TYPE_SWITCH_DNPORT)
+ return;
+ if (pd->dev_type == PCIE_TYPE_SWITCH_UPPORT &&
+ pd->slot_info)
+ return;
+ if (!pd->parent->slot_info)
+ return;
+ if (!pd->parent->slot_info->pluggable)
+ return;
+ firenze_add_pcidev_to_fsp_inventory(phb, pd);
+}
+
+static void firenze_setup_phb(struct phb *phb, unsigned int index)
+{
+ uint32_t hub_id;
+
+ /* Grab Hub ID used to parse VPDs */
+ hub_id = dt_prop_get_u32_def(phb->dt_node, "ibm,hub-id", 0);
+
+ /* Process the pcie slot entries from the lx vpd lid */
+ lxvpd_process_slot_entries(phb, dt_root, hub_id, index);
+}
+
+DECLARE_PLATFORM(firenze) = {
+ .name = "Firenze",
+ .probe = firenze_probe,
+ .init = ibm_fsp_init,
+ .cec_power_down = ibm_fsp_cec_power_down,
+ .cec_reboot = ibm_fsp_cec_reboot,
+ .pci_setup_phb = firenze_setup_phb,
+ .pci_get_slot_info = firenze_get_slot_info,
+ .pci_probe_complete = firenze_send_pci_inventory,
+ .nvram_info = fsp_nvram_info,
+ .nvram_start_read = fsp_nvram_start_read,
+ .nvram_write = fsp_nvram_write,
+} ;
diff --git a/platforms/ibm-fsp/ibm-fsp.h b/platforms/ibm-fsp/ibm-fsp.h
new file mode 100644
index 0000000..f446d18
--- /dev/null
+++ b/platforms/ibm-fsp/ibm-fsp.h
@@ -0,0 +1,26 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __IBM_FSP_COMMON_H
+#define __IBM_FSP_COMMON_H
+
+extern void ibm_fsp_init(void);
+
+extern int64_t ibm_fsp_cec_power_down(uint64_t request);
+extern int64_t ibm_fsp_cec_reboot(void);
+
+#endif /* __IBM_FSP_COMMON_H */
diff --git a/platforms/ibm-fsp/lxvpd.c b/platforms/ibm-fsp/lxvpd.c
new file mode 100644
index 0000000..bcf8118
--- /dev/null
+++ b/platforms/ibm-fsp/lxvpd.c
@@ -0,0 +1,298 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * LXVPD support
+ *
+ */
+
+
+#include <skiboot.h>
+#include <device.h>
+#include <vpd.h>
+#include <pci.h>
+#include <pci-cfg.h>
+
+#include "lxvpd.h"
+
+#define DBG(fmt...) do { } while(0)
+//#define DBG(fmt...) printf(fmt)
+
+/*
+ * XXX TODO: Add 1006 maps to add function loc codes and loc code maps
+ * (ie. -Tn part of the location code)
+ */
+struct lxvpd_slot_info_data {
+ uint8_t num_slots;
+ struct pci_slot_info info[];
+};
+
+static bool lxvpd_supported_slot(struct phb *phb, struct pci_device *pd)
+{
+ /* PCI/PCI-X we only support top level PHB with NULL "pd" */
+ if (phb->phb_type < phb_type_pcie_v1)
+ return pd == NULL;
+
+ /* Now we have PCI Express, we should never have a NULL "pd" */
+ if (!pd)
+ return false;
+
+ /* We support the root complex at the top level */
+ if (pd->dev_type == PCIE_TYPE_ROOT_PORT && !pd->parent)
+ return true;
+
+ /* We support an upstream switch port below the root complex */
+ if (pd->dev_type == PCIE_TYPE_SWITCH_UPPORT &&
+ pd->parent && pd->parent->dev_type == PCIE_TYPE_ROOT_PORT &&
+ !pd->parent->parent)
+ return true;
+
+ /* We support a downstream switch port below an upstream port
+ * below the root complex
+ */
+ if (pd->dev_type == PCIE_TYPE_SWITCH_DNPORT &&
+ pd->parent && pd->parent->dev_type == PCIE_TYPE_SWITCH_UPPORT &&
+ pd->parent->parent &&
+ pd->parent->parent->dev_type == PCIE_TYPE_ROOT_PORT &&
+ !pd->parent->parent->parent)
+ return true;
+
+ /* Anything else, bail */
+ return false;
+}
+
+void lxvpd_get_slot_info(struct phb *phb, struct pci_device * pd)
+{
+ struct lxvpd_slot_info_data *sdata = phb->platform_data;
+ bool is_phb = (pd && pd->parent) ? false : true;
+ bool entry_found = false;
+ uint8_t idx;
+
+ /* Check if we have slot info */
+ if (!sdata)
+ return;
+
+ DBG("LXVPD: Get Slot Info PHB%d pd=%x\n", phb->opal_id,
+ pd ? pd->bdfn : 0);
+
+ /*
+ * This code only handles PHBs and PCIe switches at the top level.
+ *
+ * We do not handle any other switch nor any other type of PCI/PCI-X
+ * bridge.
+ */
+ if (!lxvpd_supported_slot(phb, pd)) {
+ DBG("LXVPD: Unsupported slot\n");
+ return;
+ }
+
+ /* Iterate the slot map */
+ for (idx = 0; idx <= sdata->num_slots; idx++) {
+ struct pci_slot_info *info = &sdata->info[idx];
+ uint8_t pd_dev = (pd->bdfn >> 3) & 0x1f;
+
+ /* Match PHB with switch_id == 0 */
+ if (is_phb && info->switch_id == 0) {
+ entry_found = true;
+ break;
+ }
+
+ /* Match switch port with switch_id != 0 */
+ if (!is_phb && info->switch_id !=0 && info->dev_id == pd_dev) {
+ entry_found = true;
+ break;
+ }
+ }
+
+ if (entry_found) {
+ pd->slot_info = &sdata->info[idx];
+ DBG("PCI: PCIE Slot Info: \n");
+ DBG(" Label %s\n", pd->slot_info->label);
+ DBG(" Pluggable 0x%x\n", pd->slot_info->pluggable?1:0);
+ DBG(" Power Ctl 0x%x\n", pd->slot_info->power_ctl?1:0);
+ DBG(" Wired Lanes 0x%x\n", pd->slot_info->wired_lanes);
+ DBG(" Bus Clock 0x%x\n", pd->slot_info->bus_clock);
+ DBG(" Connector 0x%x\n", pd->slot_info->connector_type);
+ DBG(" Slot Index %d\n", pd->slot_info->slot_index);
+ } else
+ DBG("PCI: PCIE Slot Info Not Found\n");
+}
+
+static struct pci_slot_info *lxvpd_alloc_slot_info(struct phb *phb, int count)
+{
+ struct lxvpd_slot_info_data *data;
+
+ data = zalloc(sizeof(struct lxvpd_slot_info_data) *
+ count * sizeof(struct pci_slot_info));
+ assert(data);
+ data->num_slots = count;
+ phb->platform_data = data;
+
+ return data->info;
+}
+
+static void lxvpd_parse_1004_map(struct phb *phb, const uint8_t *sm, uint8_t sz)
+{
+ const struct pci_slot_entry_1004 *entry = NULL;
+ struct pci_slot_info *slot_info, *info;
+ uint8_t num_slots, slot, idx;
+
+ num_slots = (sz / sizeof(struct pci_slot_entry_1004));
+ slot_info = lxvpd_alloc_slot_info(phb, num_slots);
+
+ /* Iterate thru the entries in the keyword */
+ entry = (const struct pci_slot_entry_1004 *)sm;
+ for (slot = 0; slot < num_slots; slot++) {
+ info = &slot_info[slot];
+
+ /* Put slot info into pci device structure */
+ info->switch_id = entry->pba >> 4;
+ info->vswitch_id = entry->pba &0xf;
+ info->dev_id = entry->sba;
+ for (idx = 0; idx < 3; idx++)
+ info->label[idx] = entry->label[idx];
+ info->label[3] = 0;
+ info->pluggable = ((entry->p0.byte & 0x20) == 0);
+ info->power_ctl = ((entry->p0.power_ctl & 0x40) == 1);
+ switch(entry->p1.wired_lanes) {
+ case 1: info->wired_lanes = PCI_SLOT_WIRED_LANES_PCIX_32; break;
+ case 2: /* fall through */
+ case 3: info->wired_lanes = PCI_SLOT_WIRED_LANES_PCIX_64; break;
+ case 4: info->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X1; break;
+ case 5: info->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X4; break;
+ case 6: info->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X8; break;
+ case 7: info->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X16; break;
+ default:
+ info->wired_lanes = PCI_SLOT_WIRED_LANES_UNKNOWN;
+ }
+ info->wired_lanes = (entry->p1.wired_lanes - 3);
+ info->bus_clock = (entry->p2.bus_clock - 4);
+ info->connector_type = (entry->p2.connector_type - 5);
+ if (entry->p3.byte < 0xC0)
+ info->card_desc = ((entry->p3.byte >> 6) - 4) ;
+ else
+ info->card_desc = (entry->p3.byte >> 6);
+ info->card_mech = ((entry->p3.byte >> 4) & 0x3);
+ info->pwr_led_ctl = ((entry->p3.byte & 0xF) >> 2);
+ info->attn_led_ctl = (entry->p3.byte & 0x3);
+ info->slot_index = entry->slot_index;
+ entry++;
+ }
+}
+
+static void lxvpd_parse_1005_map(struct phb *phb, const uint8_t *sm, uint8_t sz)
+{
+ const struct pci_slot_entry_1005 *entry = NULL;
+ struct pci_slot_info *slot_info, *info;
+ uint8_t num_slots, slot, idx;
+
+ num_slots = (sz / sizeof(struct pci_slot_entry_1005));
+ slot_info = lxvpd_alloc_slot_info(phb, num_slots);
+
+ /* Iterate thru the entries in the keyword */
+ entry = (const struct pci_slot_entry_1005 *)sm;
+ for (slot = 0; slot < num_slots; slot++) {
+ info = &slot_info[slot];
+
+ /* Put slot info into pci device structure */
+ info->switch_id = entry->pba >> 4;
+ info->vswitch_id = entry->pba &0xf;
+ info->dev_id = entry->switch_device_id;
+ for (idx = 0; idx < 8; idx++)
+ info->label[idx] = entry->label[idx];
+ info->label[8] = 0;
+ info->pluggable = (entry->p0.pluggable == 0);
+ info->power_ctl = entry->p0.power_ctl;
+ info->wired_lanes = entry->p1.wired_lanes;
+ if (info->wired_lanes > PCI_SLOT_WIRED_LANES_PCIE_X32)
+ info->wired_lanes = PCI_SLOT_WIRED_LANES_UNKNOWN;
+ info->bus_clock = entry->p2.bus_clock;
+ info->connector_type = entry->p2.connector_type;
+ info->card_desc = (entry->p3.byte >> 6);
+ info->card_mech = ((entry->p3.byte >> 4) & 0x3);
+ info->pwr_led_ctl = ((entry->p3.byte & 0xF) >> 2);
+ info->attn_led_ctl = (entry->p3.byte & 0x3);
+ info->slot_index = entry->slot_index;
+ entry++;
+ }
+}
+
+void lxvpd_process_slot_entries(struct phb *phb,
+ struct dt_node *node,
+ uint8_t chip_id,
+ uint8_t index)
+{
+ const void *lxvpd;
+ const uint8_t *pr_rec, *pr_end, *sm;
+ size_t lxvpd_size, pr_size;
+ const uint16_t *mf = NULL;
+ char record[5] = "PR00";
+ uint8_t mf_sz, sm_sz;
+ bool found = false;
+
+ record[2] += chip_id;
+ record[3] += index;
+ record[4] = 0;
+
+ /* Get LX VPD pointer */
+ lxvpd = dt_prop_get_def_size(node, "ibm,io-vpd", NULL, &lxvpd_size);
+ if (!lxvpd) {
+ printf("LXVPD: LX VPD not found for %s in %p\n",
+ record, phb->dt_node);
+ return;
+ }
+
+ pr_rec = vpd_find_record(lxvpd, lxvpd_size, record, &pr_size);
+ if (!pr_rec) {
+ printf("LXVPD: %s record not found in LX VPD in %p\n",
+ record, phb->dt_node);
+ return;
+ }
+ pr_end = pr_rec + pr_size;
+
+ DBG("LXVPD: %s record for PHB%d is %ld bytes\n",
+ record, phb->opal_id, pr_size);
+
+ /* As long as there's still something in the PRxy record... */
+ while(pr_rec < pr_end) {
+ pr_size = pr_end - pr_rec;
+
+ /* Find the next MF keyword */
+ mf = vpd_find_keyword(pr_rec, pr_size, "MF", &mf_sz);
+ /* And the corresponding SM */
+ sm = vpd_find_keyword(pr_rec, pr_size, "SM", &sm_sz);
+ if (!mf || !sm) {
+ if (!found)
+ printf("LXVPD: Slot Map keyword %s not found\n",
+ record);
+ return;
+ }
+ DBG("LXVPD: Found 0x%04x map...\n", *mf);
+
+ switch (*mf) {
+ case 0x1004:
+ lxvpd_parse_1004_map(phb, sm + 1, sm_sz - 1);
+ found = true;
+ break;
+ case 0x1005:
+ lxvpd_parse_1005_map(phb, sm + 1, sm_sz - 1);
+ found = true;
+ break;
+ /* Add support for 0x1006 maps ... */
+ }
+ pr_rec = sm + sm_sz;
+ }
+}
+
diff --git a/platforms/ibm-fsp/lxvpd.h b/platforms/ibm-fsp/lxvpd.h
new file mode 100644
index 0000000..ce70502
--- /dev/null
+++ b/platforms/ibm-fsp/lxvpd.h
@@ -0,0 +1,111 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LXVPD_H
+#define __LXVPD_H
+
+/* P8 PCI Slot Entry Definitions -- 1005 */
+
+struct slot_p0 {
+ union {
+ uint8_t byte;
+ struct {
+ uint8_t pluggable:1;
+ uint8_t pluggable_location:3;
+ uint8_t power_ctl:1;
+ uint8_t rsvd_5:1;
+ uint8_t upstream_port:1;
+ uint8_t alt_load_source:1;
+ };
+ };
+};
+
+struct slot_p1 {
+ uint8_t rsvd_0:1;
+ uint8_t wired_lanes:3;
+ uint8_t rsvd_4:4;
+};
+
+struct slot_p2 {
+ uint8_t rsvd_0:1;
+ uint8_t bus_clock:3;
+ uint8_t connector_type:4;
+};
+
+struct slot_p3 {
+ union {
+ uint8_t byte;
+ struct {
+ uint8_t height:1;
+ uint8_t length:1;
+ uint8_t left_mech:1;
+ uint8_t right_mech:1;
+ uint8_t pow_led_kvm:1;
+ uint8_t pow_led_fsp:1;
+ uint8_t attn_led_kvm:1;
+ uint8_t attn_led_fsp:1;
+ };
+ };
+};
+
+struct pci_slot_entry_1004 {
+ uint8_t pba;
+ uint8_t sba;
+ uint8_t phb_or_slot_type;
+ char label[3];
+ uint16_t bis;
+ struct slot_p0 p0;
+ struct slot_p1 p1;
+ struct slot_p2 p2;
+ struct slot_p3 p3;
+ uint8_t left_pitch;
+ uint8_t right_pitch;
+ uint8_t slot_index;
+ uint8_t max_slot_power;
+};
+
+struct pci_slot_entry_1005 {
+ union {
+ uint8_t pba;
+ struct {
+ uint8_t switch_id:4;
+ uint8_t vswitch_id:4;
+ };
+ };
+ uint8_t switch_device_id;
+ uint8_t slot_type:4;
+ uint8_t phb_id:4;
+ char label[8];
+ uint8_t rsvd_11[4];
+ struct slot_p0 p0;
+ struct slot_p1 p1;
+ struct slot_p2 p2;
+ struct slot_p3 p3;
+ uint8_t left_pitch;
+ uint8_t right_pitch;
+ uint8_t slot_index;
+ uint8_t rsvd_22[2];
+};
+
+struct phb;
+
+extern void lxvpd_process_slot_entries(struct phb *phb,
+ struct dt_node *node,
+ uint8_t chip_id, uint8_t index);
+
+extern void lxvpd_get_slot_info(struct phb *phb, struct pci_device * pd);
+
+#endif /* __LXVPD_H */
diff --git a/platforms/rhesus/Makefile.inc b/platforms/rhesus/Makefile.inc
new file mode 100644
index 0000000..5899ca2
--- /dev/null
+++ b/platforms/rhesus/Makefile.inc
@@ -0,0 +1,6 @@
+SUBDIRS += $(PLATDIR)/rhesus
+
+RHESUS_OBJS = rhesus.o
+RHESUS = $(PLATDIR)/rhesus/built-in.o
+$(RHESUS): $(RHESUS_OBJS:%=$(PLATDIR)/rhesus/%)
+
diff --git a/platforms/rhesus/rhesus.c b/platforms/rhesus/rhesus.c
new file mode 100644
index 0000000..41b1d5a
--- /dev/null
+++ b/platforms/rhesus/rhesus.c
@@ -0,0 +1,313 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <skiboot.h>
+#include <device.h>
+#include <lpc.h>
+#include <console.h>
+#include <opal.h>
+#include <libflash/libflash.h>
+#include <libflash/libffs.h>
+#include <sfc-ctrl.h>
+#include <ec/config.h>
+#include <ec/gpio.h>
+
+/*
+ * EC GPIO mapping
+ */
+#define RHESUS_RST_UCD90160_N EC_GPIO_PORT_J, 3
+#define RHESUS_FM_PWR_CYCLE_N EC_GPIO_PORT_K, 2
+#define RHESUS_EN_PWR_ON_SEQ EC_GPIO_PORT_R, 1
+#define RHESUS_BOARD_REVISION0 EC_GPIO_PORT_F, 3
+#define RHESUS_BOARD_REVISION1 EC_GPIO_PORT_F, 2
+#define RHESUS_BOARD_REVISION2 EC_GPIO_PORT_E, 5
+#define RHESUS_BOARD_REVISION3 EC_GPIO_PORT_E, 4
+#define RHESUS_BOARD_REVISION4 EC_GPIO_PORT_E, 1
+
+static struct spi_flash_ctrl *pnor_ctrl;
+static struct flash_chip *pnor_chip;
+static struct ffs_handle *pnor_ffs;
+
+
+/*
+ * IO accessors for the EC driver
+ */
+void ec_outb(uint16_t addr, uint8_t data)
+{
+ lpc_outb(data, addr);
+}
+
+uint8_t ec_inb(uint16_t addr)
+{
+ return lpc_inb(addr);
+}
+
+static int rhesus_board_revision(void)
+{
+ int revision = 0, ret = 0, i = 0;
+
+ static const struct {
+ EcGpioPort port;
+ uint8_t pin;
+ } revision_gpios[] = {
+ { RHESUS_BOARD_REVISION0 },
+ { RHESUS_BOARD_REVISION1 },
+ { RHESUS_BOARD_REVISION2 },
+ { RHESUS_BOARD_REVISION3 },
+ { RHESUS_BOARD_REVISION4 },
+ };
+ for (i = 0; i < sizeof(revision_gpios) / sizeof(revision_gpios[0]); ++i)
+ {
+ ret = ec_gpio_read(revision_gpios[i].port, revision_gpios[i].pin);
+ if (ret < 0)
+ return ret;
+ revision <<= 1; revision |= ret;
+ }
+
+ return revision;
+}
+
+static int64_t rhesus_reboot(void)
+{
+ // TODO(rlippert): This should use EC_SYS_RST_N, but there is nothing to
+ // deassert that at the moment.
+ int ret = 0;
+ ret = ec_gpio_set(RHESUS_FM_PWR_CYCLE_N, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = ec_gpio_setup(RHESUS_FM_PWR_CYCLE_N,
+ EC_GPIO_OUTPUT,
+ EC_GPIO_PULLUP_DISABLE);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int64_t rhesus_power_down(uint64_t request __unused)
+{
+ int ret = 0;
+ ret = ec_gpio_set(RHESUS_EN_PWR_ON_SEQ, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = ec_gpio_setup(RHESUS_EN_PWR_ON_SEQ,
+ EC_GPIO_OUTPUT,
+ EC_GPIO_PULLUP_DISABLE);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rhesus_pnor_init(void)
+{
+ uint32_t nv_part, nv_start, nv_size;
+ int rc;
+
+ /* Open controller, flash and ffs */
+ rc = sfc_open(&pnor_ctrl);
+ if (rc) {
+ prerror("PLAT: Failed to open PNOR flash controller\n");
+ goto fail;
+ }
+ rc = flash_init(pnor_ctrl, &pnor_chip);
+ if (rc) {
+ prerror("PLAT: Failed to open init PNOR driver\n");
+ goto fail;
+ }
+ rc = ffs_open_flash(pnor_chip, 0, 0, &pnor_ffs);
+ if (rc) {
+ prerror("PLAT: Failed to parse FFS partition map\n");
+ goto fail;
+ }
+
+ /*
+ * Grab NVRAM and initialize the flash_nvram module
+ *
+ * Note: Ignore actual size for now ... some images have
+ * it setup incorrectly.
+ */
+ rc = ffs_lookup_part(pnor_ffs, "NVRAM", &nv_part);
+ if (rc) {
+ prerror("PLAT: No NVRAM partition in PNOR\n");
+ return OPAL_HARDWARE;
+ }
+ rc = ffs_part_info(pnor_ffs, nv_part, NULL,
+ &nv_start, &nv_size, NULL);
+ if (rc) {
+ prerror("PLAT: Failed to get NVRAM partition info\n");
+ return OPAL_HARDWARE;
+ }
+ flash_nvram_init(pnor_chip, nv_start, nv_size);
+
+ return 0;
+ fail:
+ if (pnor_ffs)
+ ffs_close(pnor_ffs);
+ pnor_ffs = NULL;
+ if (pnor_chip)
+ flash_exit(pnor_chip);
+ pnor_chip = NULL;
+ if (pnor_ctrl)
+ sfc_close(pnor_ctrl);
+ pnor_ctrl = NULL;
+
+ return rc;
+}
+
+static void rhesus_init(void)
+{
+ if (dummy_console_enabled())
+ dummy_console_add_nodes();
+
+ /* Initialize PNOR/NVRAM */
+ rhesus_pnor_init();
+}
+
+static void rhesus_dt_fixup_uart(struct dt_node *lpc)
+{
+ /*
+ * The official OF ISA/LPC binding is a bit odd, it prefixes
+ * the unit address for IO with "i". It uses 2 cells, the first
+ * one indicating IO vs. Memory space (along with bits to
+ * represent aliasing).
+ *
+ * We pickup that binding and add to it "2" as a indication
+ * of FW space.
+ *
+ * TODO: Probe the UART instead if the LPC bus allows for it
+ */
+ struct dt_node *uart;
+ char namebuf[32];
+#define UART_IO_BASE 0x3f8
+#define UART_IO_COUNT 8
+
+ sprintf(namebuf, "serial@i%x", UART_IO_BASE);
+ uart = dt_new(lpc, namebuf);
+
+ dt_add_property_cells(uart, "reg",
+ 1, /* IO space */
+ UART_IO_BASE, UART_IO_COUNT);
+ dt_add_property_strings(uart, "compatible",
+ "ns16550",
+ "pnpPNP,501");
+ dt_add_property_cells(uart, "clock-frequency", 1843200);
+ dt_add_property_cells(uart, "current-speed", 115200);
+
+ /*
+ * This is needed by Linux for some obscure reasons,
+ * we'll eventually need to sanitize it but in the meantime
+ * let's make sure it's there
+ */
+ dt_add_property_strings(uart, "device_type", "serial");
+
+ /*
+ * Add interrupt. This simulates coming from HostBoot which
+ * does not know our interrupt numbering scheme. Instead, it
+ * just tells us which chip the interrupt is wired to, it will
+ * be the PSI "host error" interrupt of that chip. For now we
+ * assume the same chip as the LPC bus is on.
+ */
+ dt_add_property_cells(uart, "ibm,irq-chip-id", dt_get_chip_id(lpc));
+}
+
+/*
+ * This adds the legacy RTC device to the device-tree
+ * for Linux to use
+ */
+static void rhesus_dt_fixup_rtc(struct dt_node *lpc)
+{
+ struct dt_node *rtc;
+
+ /*
+ * Follows the structure expected by the kernel file
+ * arch/powerpc/sysdev/rtc_cmos_setup.c
+ */
+ rtc = dt_new_addr(lpc, "rtc", EC_RTC_PORT_BASE);
+ dt_add_property_string(rtc, "compatible", "pnpPNP,b00");
+ dt_add_property_cells(rtc, "reg",
+ 1, /* IO space */
+ EC_RTC_PORT_BASE,
+ /* 1 index/data pair per 128 bytes */
+ (EC_RTC_BLOCK_SIZE / 128) * 2);
+}
+
+static void rhesus_dt_fixup(void)
+{
+ struct dt_node *n, *primary_lpc = NULL;
+
+ /* Find the primary LPC bus */
+ dt_for_each_compatible(dt_root, n, "ibm,power8-lpc") {
+ if (!primary_lpc || dt_has_node_property(n, "primary", NULL))
+ primary_lpc = n;
+ if (dt_has_node_property(n, "#address-cells", NULL))
+ break;
+ }
+
+ if (!primary_lpc)
+ return;
+
+ rhesus_dt_fixup_rtc(primary_lpc);
+ rhesus_dt_fixup_uart(primary_lpc);
+}
+
+static bool rhesus_probe(void)
+{
+ const char *model;
+ int rev;
+
+ if (!dt_node_is_compatible(dt_root, "ibm,powernv"))
+ return false;
+
+ model = dt_prop_get_def(dt_root, "model", NULL);
+ if (!model || !(strstr(model, "rhesus") || strstr(model, "RHESUS")))
+ return false;
+
+ /* Grab board version from EC */
+ rev = rhesus_board_revision();
+ if (rev >= 0) {
+ printf("Rhesus board rev %d\n", rev);
+ dt_add_property_cells(dt_root, "revision-id", rev);
+ } else
+ prerror("Rhesus board revision not found !\n");
+
+ /* Add missing bits of device-tree such as the UART */
+ rhesus_dt_fixup();
+
+ /*
+ * Setup UART and use it as console. For now, we
+ * don't expose the interrupt as we know it's not
+ * working properly yet
+ */
+ uart_init(false);
+
+ return true;
+}
+
+DECLARE_PLATFORM(rhesus) = {
+ .name = "Rhesus",
+ .probe = rhesus_probe,
+ .init = rhesus_init,
+ .cec_power_down = rhesus_power_down,
+ .cec_reboot = rhesus_reboot,
+};
diff --git a/skiboot.lds.S b/skiboot.lds.S
new file mode 100644
index 0000000..cbc8b85
--- /dev/null
+++ b/skiboot.lds.S
@@ -0,0 +1,143 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include <mem-map.h>
+
+ENTRY(boot_entry);
+SECTIONS
+{
+ . = 0;
+
+ .head : {
+ KEEP(*(.head))
+ }
+
+ . = SPIRA_OFF;
+ .spira : {
+ KEEP(*(.spira.data))
+ }
+
+ . = PROCIN_OFF;
+ .procin.data : {
+ KEEP(*(.procin.data))
+ }
+
+ . = MDST_TABLE_OFF;
+ .mdst : {
+ KEEP(*(.mdst.data))
+ }
+
+ . = ALIGN(0x10);
+ .text : {
+ *(.text*)
+ *(.sfpr)
+ }
+
+ .rodata : {
+ __rodata_start = .;
+ *(.rodata .rodata.*)
+ __rodata_end = .;
+ }
+
+ .data : {
+ /*
+ * A couple of things that need to be 4K aligned and
+ * to reside in their own pages for the sake of TCE
+ * mappings
+ */
+ . = ALIGN(0x1000);
+ *(.data.memcons);
+ . = ALIGN(0x1000);
+ *(.data.boot_trace);
+ . = ALIGN(0x1000);
+ *(.data*)
+ *(.force.data)
+ *(.toc1)
+ *(.branch_lt)
+ }
+
+ . = ALIGN(0x10);
+ .opd : {
+ *(.opd)
+ }
+
+ . = ALIGN(0x10);
+ .got : {
+ __toc_start = . + 0x8000;
+ *(.got)
+ *(.toc)
+ }
+
+ . = ALIGN(0x10);
+ .opal_table : {
+ __opal_table_start = .;
+ KEEP(*(.opal_table))
+ __opal_table_end = .;
+ }
+
+ .platforms : {
+ __platforms_start = .;
+ KEEP(*(.platforms))
+ __platforms_end = .;
+ }
+
+ /* Do I need to keep these ? */
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+
+ /* Relocations */
+ . = ALIGN(0x10);
+ .dynamic : {
+ __dynamic_start = .;
+ *(.dynamic)
+ __dynamic_end = .;
+ }
+
+ . = ALIGN(0x10);
+ .rela.dyn : {
+ __rela_dyn_start = .;
+ *(.rela*)
+ __rela_dyn_end = .;
+ }
+
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+
+ . = ALIGN(0x1000);
+ _sbss = .;
+ .bss : {
+ *(.bss*)
+ }
+ . = ALIGN(0x10000);
+ _ebss = .;
+ _end = .;
+
+ /* Optional kernel image */
+ . = ALIGN(0x10000);
+ .builtin_kernel : {
+ __builtin_kernel_start = .;
+ KEEP(*(.builtin_kernel))
+ __builtin_kernel_end = .;
+ }
+
+ /* Discards */
+ /DISCARD/ : {
+ *(.comment)
+ *(.interp)
+ }
+}