aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlistair Popple <alistair@popple.id.au>2014-10-20 11:42:54 +1100
committerStewart Smith <stewart@linux.vnet.ibm.com>2014-10-22 18:00:10 +1100
commitcf6f4e8912d29fb89ce85c84834607065ad595a5 (patch)
tree4649ba458e1a9e1726135304ac135320b612d268
parentde6c2a5ad4baf0a45e72ba82d3ba87446e68768c (diff)
downloadskiboot-cf6f4e8912d29fb89ce85c84834607065ad595a5.zip
skiboot-cf6f4e8912d29fb89ce85c84834607065ad595a5.tar.gz
skiboot-cf6f4e8912d29fb89ce85c84834607065ad595a5.tar.bz2
fsp/elog: Create a logging frontend
In order to support fsp-less machines we need to be able to log errors using a BMC or some other mechanism. Currently the error logging code is tightly coupled to the platform making it difficult to add different platforms. This patch factors out the generic parts of the error logging code in preparation for adding different logging backends. It also adds a generic mechanism for pre-allocating a specific number of objects. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
-rw-r--r--.gitignore1
-rw-r--r--core/Makefile.inc2
-rw-r--r--core/errorlog.c176
-rw-r--r--core/pool.c80
-rw-r--r--core/test/Makefile.check2
-rw-r--r--core/test/run-pool.c54
-rw-r--r--hw/fsp/fsp-elog-write.c201
-rw-r--r--include/errorlog.h41
-rw-r--r--include/fsp-elog.h10
-rw-r--r--include/pel.h2
-rw-r--r--include/pool.h21
11 files changed, 384 insertions, 206 deletions
diff --git a/.gitignore b/.gitignore
index cc55710..357b24e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,6 +27,7 @@ core/test/run-mem_region_release_unused
core/test/run-mem_region_release_unused_noalloc
core/test/run-msg
core/test/run-pel
+core/test/run-pool
core/test/run-trace
core/test/*-gcov
external/dump_trace
diff --git a/core/Makefile.inc b/core/Makefile.inc
index 8b9a03b..c848c8e 100644
--- a/core/Makefile.inc
+++ b/core/Makefile.inc
@@ -6,7 +6,7 @@ CORE_OBJS += malloc.o lock.o cpu.o utils.o fdt.o opal.o interrupts.o
CORE_OBJS += timebase.o opal-msg.o pci.o pci-opal.o fast-reboot.o
CORE_OBJS += device.o exceptions.o trace.o affinity.o vpd.o
CORE_OBJS += hostservices.o platform.o nvram.o flash-nvram.o hmi.o
-CORE_OBJS += console-log.o ipmi.o time-utils.o pel.o
+CORE_OBJS += console-log.o ipmi.o time-utils.o pel.o pool.o errorlog.o
CORE=core/built-in.o
$(CORE): $(CORE_OBJS:%=core/%)
diff --git a/core/errorlog.c b/core/errorlog.c
new file mode 100644
index 0000000..8867a8e
--- /dev/null
+++ b/core/errorlog.c
@@ -0,0 +1,176 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains the front end for OPAL error logging. It is used
+ * to construct a struct errorlog representing the event/error to be
+ * logged which is then passed to the platform specific backend to log
+ * the actual errors.
+ */
+#include <skiboot.h>
+#include <lock.h>
+#include <errorlog.h>
+#include <fsp-elog.h>
+#include <pool.h>
+
+/*
+ * Maximum number buffers that are pre-allocated
+ * to hold elogs that are reported on Sapphire and
+ * powernv.
+ */
+#define ELOG_WRITE_MAX_RECORD 64
+
+/* Platform Log ID as per the spec */
+static uint32_t sapphire_elog_id = 0xB0000000;
+/* Reserved for future use */
+/* static uint32_t powernv_elog_id = 0xB1000000; */
+
+/* Pool to allocate elog messages from */
+static struct pool elog_pool;
+static struct lock elog_lock = LOCK_UNLOCKED;
+
+static struct errorlog *get_write_buffer(int opal_event_severity)
+{
+ struct errorlog *buf;
+
+ lock(&elog_lock);
+ if (opal_event_severity == OPAL_ERROR_PANIC)
+ buf = pool_get(&elog_pool, POOL_HIGH);
+ else
+ buf = pool_get(&elog_pool, POOL_NORMAL);
+ unlock(&elog_lock);
+ return buf;
+}
+
+int opal_elog_update_user_dump(struct errorlog *buf, unsigned char *data,
+ uint32_t tag, uint16_t size)
+{
+ char *buffer;
+ struct elog_user_data_section *tmp;
+
+ if (!buf) {
+ prerror("ELOG: Cannot update user data. Buffer is invalid\n");
+ return -1;
+ }
+
+ buffer = (char *)buf->user_data_dump + buf->user_section_size;
+ if ((buf->user_section_size + size) > OPAL_LOG_MAX_DUMP) {
+ prerror("ELOG: Size of dump data overruns buffer\n");
+ return -1;
+ }
+
+ tmp = (struct elog_user_data_section *)buffer;
+ tmp->tag = tag;
+ tmp->size = size + sizeof(struct elog_user_data_section) - 1;
+ memcpy(tmp->data_dump, data, size);
+
+ buf->user_section_size += tmp->size;
+ buf->user_section_count++;
+ return 0;
+}
+
+/* Reporting of error via struct errorlog */
+struct errorlog *opal_elog_create(struct opal_err_info *e_info)
+{
+ struct errorlog *buf;
+
+ buf = get_write_buffer(e_info->sev);
+ if (buf) {
+ buf->error_event_type = e_info->err_type;
+ buf->component_id = e_info->cmp_id;
+ buf->subsystem_id = e_info->subsystem;
+ buf->event_severity = e_info->sev;
+ buf->event_subtype = e_info->event_subtype;
+ buf->reason_code = e_info->reason_code;
+ buf->elog_origin = ORG_SAPPHIRE;
+
+ lock(&elog_lock);
+ buf->plid = ++sapphire_elog_id;
+ unlock(&elog_lock);
+ }
+
+ return buf;
+}
+
+void opal_elog_complete(struct errorlog *buf, bool success)
+{
+ if (!success)
+ printf("Unable to log error\n");
+
+ lock(&elog_lock);
+ pool_free_object(&elog_pool, buf);
+ unlock(&elog_lock);
+}
+
+void log_error(struct opal_err_info *e_info, void *data, uint16_t size,
+ const char *fmt, ...)
+{
+ struct errorlog *buf;
+ int tag = 0x44455343; /* ASCII of DESC */
+ va_list list;
+ char err_msg[250];
+
+ va_start(list, fmt);
+ vsnprintf(err_msg, sizeof(err_msg), fmt, list);
+ va_end(list);
+
+ /* Log the error on to Sapphire console */
+ prerror("%s", err_msg);
+
+ buf = opal_elog_create(e_info);
+ if (buf == NULL)
+ prerror("ELOG: Error getting buffer to log error\n");
+ else {
+ opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
+ /* Append any number of call out dumps */
+ if (e_info->call_out)
+ e_info->call_out(buf, data, size);
+ if (elog_fsp_commit(buf))
+ prerror("ELOG: Re-try error logging\n");
+ }
+}
+
+void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...)
+{
+ struct errorlog *buf;
+ int tag = 0x44455343; /* ASCII of DESC */
+ va_list list;
+ char err_msg[250];
+
+ va_start(list, fmt);
+ vsnprintf(err_msg, sizeof(err_msg), fmt, list);
+ va_end(list);
+
+ /* Log the error on to Sapphire console */
+ prerror("%s", err_msg);
+
+ buf = opal_elog_create(e_info);
+ if (buf == NULL)
+ prerror("ELOG: Error getting buffer to log error\n");
+ else {
+ opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
+ if (elog_fsp_commit(buf))
+ prerror("ELOG: Re-try error logging\n");
+ }
+}
+
+int elog_init(void)
+{
+ /* pre-allocate memory for records */
+ if (pool_init(&elog_pool, sizeof(struct errorlog), ELOG_WRITE_MAX_RECORD, 1))
+ return OPAL_RESOURCE;
+
+ return 0;
+}
diff --git a/core/pool.c b/core/pool.c
new file mode 100644
index 0000000..adf6c90
--- /dev/null
+++ b/core/pool.c
@@ -0,0 +1,80 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file provides some functions to manage a pool of pre-allocated
+ * objects. It also provides a method to reserve a pre-defined number
+ * of objects for higher priorty requests. The allocations follow the
+ * following rules:
+ *
+ * 1. An allocation will succeed at any priority if there is more than
+ * the reserved number of objects free.
+ * 2. Only high priority allocations will succeed when there are less
+ * than the reserved number of objects free.
+ * 3. When an allocation is freed it is always added to the high priority
+ * pool if there are less than the reserved number of allocations
+ * available.
+ */
+
+#include <pool.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ccan/list/list.h>
+
+void* pool_get(struct pool *pool, enum pool_priority priority)
+{
+ void *obj;
+
+ if (!pool->free_count ||
+ ((pool->free_count <= pool->reserved) && priority == POOL_NORMAL))
+ return NULL;
+
+ pool->free_count--;
+ obj = (void *) list_pop_(&pool->free_list, 0);
+ assert(obj);
+ memset(obj, 0, pool->obj_size);
+ return obj;
+}
+
+void pool_free_object(struct pool *pool, void *obj)
+{
+ pool->free_count++;
+ list_add_tail(&pool->free_list,
+ (struct list_node *) (obj));
+}
+
+int pool_init(struct pool *pool, size_t obj_size, int count, int reserved)
+{
+ int i;
+
+ if (obj_size < sizeof(struct list_node))
+ obj_size = sizeof(struct list_node);
+
+ assert(count >= reserved);
+ pool->buf = malloc(obj_size*count);
+ if (!pool->buf)
+ return -1;
+
+ pool->obj_size = obj_size;
+ pool->free_count = count;
+ pool->reserved = reserved;
+ list_head_init(&pool->free_list);
+
+ for(i = 0; i < count; i++)
+ list_add_tail(&pool->free_list,
+ (struct list_node *) (pool->buf + obj_size*i));
+
+ return 0;
+}
diff --git a/core/test/Makefile.check b/core/test/Makefile.check
index 3fe4530..1e5d797 100644
--- a/core/test/Makefile.check
+++ b/core/test/Makefile.check
@@ -1,5 +1,5 @@
# -*-Makefile-*-
-CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg core/test/run-pel
+CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg core/test/run-pel core/test/run-pool
check: $(CORE_TEST:%=%-check) $(CORE_TEST:%=%-gcov-run)
diff --git a/core/test/run-pool.c b/core/test/run-pool.c
new file mode 100644
index 0000000..1f81161
--- /dev/null
+++ b/core/test/run-pool.c
@@ -0,0 +1,54 @@
+#include <pool.h>
+
+#include "../pool.c"
+
+#define POOL_OBJ_COUNT 10
+#define POOL_RESERVED_COUNT 2
+#define POOL_NORMAL_COUNT (POOL_OBJ_COUNT - POOL_RESERVED_COUNT)
+
+struct test_object
+{
+ int a;
+ int b;
+ int c;
+};
+
+int main(void)
+{
+ int i, count = 0;
+ struct pool pool;
+ struct test_object *a[POOL_OBJ_COUNT];
+
+ assert(!pool_init(&pool, sizeof(struct test_object), POOL_OBJ_COUNT,
+ POOL_RESERVED_COUNT));
+
+ a[0] = pool_get(&pool, POOL_NORMAL);
+ assert(a[0]);
+ pool_free_object(&pool, a[0]);
+
+ for(i = 0; i < POOL_NORMAL_COUNT; i++)
+ {
+ a[i] = pool_get(&pool, POOL_NORMAL);
+ if (a[i])
+ count++;
+ }
+ assert(count == POOL_NORMAL_COUNT);
+
+ /* Normal pool should be exhausted */
+ assert(!pool_get(&pool, POOL_NORMAL));
+
+ /* Reserved pool should still be available */
+ a[POOL_NORMAL_COUNT] = pool_get(&pool, POOL_HIGH);
+ assert(a[POOL_NORMAL_COUNT]);
+ a[POOL_NORMAL_COUNT + 1] = pool_get(&pool, POOL_HIGH);
+ assert(a[POOL_NORMAL_COUNT + 1]);
+
+ pool_free_object(&pool, a[3]);
+
+ /* Should be a free object to get now */
+ a[3] = pool_get(&pool, POOL_HIGH);
+ assert(a[3]);
+
+ /* This exits depending on whether all tests passed */
+ return 0;
+}
diff --git a/hw/fsp/fsp-elog-write.c b/hw/fsp/fsp-elog-write.c
index 7169aa3..3a9adda 100644
--- a/hw/fsp/fsp-elog-write.c
+++ b/hw/fsp/fsp-elog-write.c
@@ -32,16 +32,9 @@
#include <fsp-elog.h>
#include <timebase.h>
#include <pel.h>
-
-/*
- * Maximum number buffers that are pre-allocated
- * to hold elogs that are reported on Sapphire and
- * powernv.
- */
-#define ELOG_WRITE_MAX_RECORD 64
+#include <pool.h>
static LIST_HEAD(elog_write_to_fsp_pending);
-static LIST_HEAD(elog_write_free);
static LIST_HEAD(elog_write_to_host_pending);
static LIST_HEAD(elog_write_to_host_processed);
@@ -49,11 +42,6 @@ static struct lock elog_write_lock = LOCK_UNLOCKED;
static struct lock elog_panic_write_lock = LOCK_UNLOCKED;
static struct lock elog_write_to_host_lock = LOCK_UNLOCKED;
-/* Platform Log ID as per the spec */
-static uint32_t sapphire_elog_id = 0xB0000000;
-/* Reserved for future use */
-/* static uint32_t powernv_elog_id = 0xB1000000; */
-
/* log buffer to copy FSP log for READ */
#define ELOG_WRITE_TO_FSP_BUFFER_SIZE 0x00040000
static void *elog_write_to_fsp_buffer;
@@ -64,8 +52,6 @@ static void *elog_panic_write_buffer;
#define ELOG_WRITE_TO_HOST_BUFFER_SIZE 0x0010000
static void *elog_write_to_host_buffer;
-struct errorlog *panic_write_buffer;
-static int panic_write_buffer_valid;
static uint32_t elog_write_retries;
/* Manipulate this only with write_lock held */
@@ -75,114 +61,6 @@ enum elog_head_state elog_write_to_host_head_state = ELOG_STATE_NONE;
/* Need forward declaration because of Circular dependency */
static int opal_send_elog_to_fsp(void);
-void log_error(struct opal_err_info *e_info, void *data, uint16_t size,
- const char *fmt, ...)
-{
- struct errorlog *buf;
- int tag = 0x44455343; /* ASCII of DESC */
- va_list list;
- char err_msg[250];
-
- va_start(list, fmt);
- vsnprintf(err_msg, sizeof(err_msg), fmt, list);
- va_end(list);
-
- /* Log the error on to Sapphire console */
- prerror("%s", err_msg);
-
- buf = opal_elog_create(e_info);
- if (buf == NULL)
- prerror("ELOG: Error getting buffer to log error\n");
- else {
- opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
- /* Append any number of call out dumps */
- if (e_info->call_out)
- e_info->call_out(buf, data, size);
- if (elog_fsp_commit(buf))
- prerror("ELOG: Re-try error logging\n");
- }
-}
-
-
-void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...)
-{
- struct errorlog *buf;
- int tag = 0x44455343; /* ASCII of DESC */
- va_list list;
- char err_msg[250];
-
- va_start(list, fmt);
- vsnprintf(err_msg, sizeof(err_msg), fmt, list);
- va_end(list);
-
- /* Log the error on to Sapphire console */
- prerror("%s", err_msg);
-
- buf = opal_elog_create(e_info);
- if (buf == NULL)
- prerror("ELOG: Error getting buffer to log error\n");
- else {
- opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg));
- if (elog_fsp_commit(buf))
- prerror("ELOG: Re-try error logging\n");
- }
-}
-
-static struct errorlog *get_write_buffer(int opal_event_severity)
-{
- struct errorlog *buf;
-
- lock(&elog_write_lock);
- if (list_empty(&elog_write_free)) {
- unlock(&elog_write_lock);
- if (opal_event_severity == OPAL_ERROR_PANIC) {
- lock(&elog_panic_write_lock);
- if (panic_write_buffer_valid == 0) {
- buf = (struct errorlog *)
- panic_write_buffer;
- panic_write_buffer_valid = 1; /* In Use */
- unlock(&elog_panic_write_lock);
- } else {
- unlock(&elog_panic_write_lock);
- prerror("ELOG: Write buffer full. Retry later\n");
- return NULL;
- }
- } else {
- prerror("ELOG: Write buffer list is full. Retry later\n");
- return NULL;
- }
- } else {
- buf = list_pop(&elog_write_free, struct errorlog, link);
- unlock(&elog_write_lock);
- }
-
- memset(buf, 0, sizeof(struct errorlog));
- return buf;
-}
-
-/* Reporting of error via struct errorlog */
-struct errorlog *opal_elog_create(struct opal_err_info *e_info)
-{
- struct errorlog *buf;
-
- buf = get_write_buffer(e_info->sev);
- if (buf) {
- buf->error_event_type = e_info->err_type;
- buf->component_id = e_info->cmp_id;
- buf->subsystem_id = e_info->subsystem;
- buf->event_severity = e_info->sev;
- buf->event_subtype = e_info->event_subtype;
- buf->reason_code = e_info->reason_code;
- buf->elog_origin = ORG_SAPPHIRE;
-
- lock(&elog_write_lock);
- buf->plid = ++sapphire_elog_id;
- unlock(&elog_write_lock);
- }
-
- return buf;
-}
-
static void remove_elog_head_entry(void)
{
struct errorlog *head, *entry;
@@ -194,7 +72,7 @@ static void remove_elog_head_entry(void)
if (head->plid == elog_plid_fsp_commit) {
entry = list_pop(&elog_write_to_fsp_pending,
struct errorlog, link);
- list_add_tail(&elog_write_free, &entry->link);
+ opal_elog_complete(entry, elog_write_retries < MAX_RETRIES);
/* Reset the counter */
elog_plid_fsp_commit = -1;
}
@@ -329,7 +207,7 @@ bool opal_elog_ack(uint64_t ack_id)
if (record->plid != ack_id)
continue;
list_del(&record->link);
- list_add(&elog_write_free, &record->link);
+ opal_elog_complete(record, true);
rc = true;
}
}
@@ -345,7 +223,7 @@ bool opal_elog_ack(uint64_t ack_id)
if (record->plid != ack_id)
continue;
list_del(&record->link);
- list_add(&elog_write_free, &record->link);
+ opal_elog_complete(record, true);
rc = true;
}
}
@@ -425,17 +303,9 @@ static int opal_push_logs_sync_to_fsp(struct errorlog *buf)
rc = (elog_msg->resp->word1 >> 8) & 0xff;
fsp_freemsg(elog_msg);
}
+ unlock(&elog_panic_write_lock);
- if ((buf == panic_write_buffer) && (panic_write_buffer_valid == 1)) {
- panic_write_buffer_valid = 0;
- unlock(&elog_panic_write_lock);
- } else {
- /* buffer got from the elog_write list , put it back */
- unlock(&elog_panic_write_lock);
- lock(&elog_write_lock);
- list_add_tail(&elog_write_free, &buf->link);
- unlock(&elog_write_lock);
- }
+ opal_elog_complete(buf, true);
return rc;
}
@@ -468,59 +338,6 @@ int elog_fsp_commit(struct errorlog *buf)
return rc;
}
-int opal_elog_update_user_dump(struct errorlog *buf, unsigned char *data,
- uint32_t tag, uint16_t size)
-{
- char *buffer;
- struct elog_user_data_section *tmp;
-
- if (!buf) {
- prerror("ELOG: Cannot update user data. Buffer is invalid\n");
- return -1;
- }
-
- buffer = (char *)buf->user_data_dump + buf->user_section_size;
- if ((buf->user_section_size + size) > OPAL_LOG_MAX_DUMP) {
- prerror("ELOG: Size of dump data overruns buffer\n");
- return -1;
- }
-
- tmp = (struct elog_user_data_section *)buffer;
- tmp->tag = tag;
- tmp->size = size + sizeof(struct elog_user_data_section) - 1;
- memcpy(tmp->data_dump, data, size);
-
- buf->user_section_size += tmp->size;
- buf->user_section_count++;
- return 0;
-}
-
-/* Pre-allocate memory for writing error log to FSP */
-static int init_elog_write_free_list(uint32_t num_entries)
-{
- struct errorlog *entry;
- int i;
-
- entry = zalloc(sizeof(struct errorlog) * num_entries);
- if (!entry)
- goto out_err;
-
- for (i = 0; i < num_entries; ++i) {
- list_add_tail(&elog_write_free, &entry->link);
- entry++;
- }
-
- /* Pre-allocate one single buffer for PANIC path */
- panic_write_buffer = zalloc(sizeof(struct errorlog));
- if (!panic_write_buffer)
- goto out_err;
-
- return 0;
-
-out_err:
- return -ENOMEM;
-}
-
static void elog_append_write_to_host(struct errorlog *buf)
{
@@ -596,11 +413,7 @@ void fsp_elog_write_init(void)
fsp_tce_map(PSI_DMA_ELOG_WR_TO_HOST_BUF, elog_write_to_host_buffer,
PSI_DMA_ELOG_WR_TO_HOST_BUF_SZ);
- /* pre-allocate memory for 64 records */
- if (init_elog_write_free_list(ELOG_WRITE_MAX_RECORD)) {
- prerror("ELOG: Cannot allocate WRITE buffers to log errors!\n");
- return;
- }
+ elog_init();
/* Add a poller */
opal_add_poller(elog_timeout_poll, NULL);
diff --git a/include/errorlog.h b/include/errorlog.h
index 0032eff..622a51a 100644
--- a/include/errorlog.h
+++ b/include/errorlog.h
@@ -97,6 +97,10 @@
/* Max user dump size is 14K */
#define OPAL_LOG_MAX_DUMP 14336
+/* Origin of error, elog_origin */
+#define ORG_SAPPHIRE 1
+#define ORG_POWERNV 2
+
/* Multiple user data sections */
struct __attribute__((__packed__))elog_user_data_section {
uint32_t tag;
@@ -132,4 +136,41 @@ struct __attribute__((__packed__)) errorlog {
char user_data_dump[OPAL_LOG_MAX_DUMP];
struct list_node link;
};
+
+struct opal_err_info {
+ uint32_t reason_code;
+ uint8_t err_type;
+ uint16_t cmp_id;
+ uint8_t subsystem;
+ uint8_t sev;
+ uint8_t event_subtype;
+ void (*call_out)(struct errorlog *buf, void *data, uint16_t size);
+};
+
+#define DEFINE_LOG_ENTRY(reason, type, id, subsys, \
+severity, subtype, callout_func) struct opal_err_info err_##reason = \
+{ .reason_code = reason, .err_type = type, .cmp_id = id, \
+.subsystem = subsys, .sev = severity, .event_subtype = subtype, \
+.call_out = callout_func }
+
+/* This is wrapper around the error log function, which creates
+ * and commits the error to FSP.
+ * Used for simple error logging
+ */
+void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+void log_error(struct opal_err_info *e_info, void *data, uint16_t size,
+ const char *fmt, ...) __attribute__ ((format (printf, 4, 5)));
+
+int opal_elog_update_user_dump(struct errorlog *buf, unsigned char *data,
+ uint32_t tag, uint16_t size);
+
+struct errorlog *opal_elog_create(struct opal_err_info *e_info);
+
+/* Called by the backend after an error has been logged by the
+ * backend. If the error could not be logged successfully success is
+ * set to false. */
+void opal_elog_complete(struct errorlog *elog, bool success);
+
+int elog_init(void);
+
#endif /* __ERRORLOG_H */
diff --git a/include/fsp-elog.h b/include/fsp-elog.h
index abf3972..df8b2f1 100644
--- a/include/fsp-elog.h
+++ b/include/fsp-elog.h
@@ -193,16 +193,6 @@ enum opal_reasoncode {
= OPAL_FP | 0x10,
};
-struct opal_err_info {
- uint32_t reason_code;
- uint8_t err_type;
- uint16_t cmp_id;
- uint8_t subsystem;
- uint8_t sev;
- uint8_t event_subtype;
- void (*call_out)(struct errorlog *buf, void *data, uint16_t size);
-};
-
#define DEFINE_LOG_ENTRY(reason, type, id, subsys, \
severity, subtype, callout_func) struct opal_err_info err_##reason = \
{ .reason_code = reason, .err_type = type, .cmp_id = id, \
diff --git a/include/pel.h b/include/pel.h
index 6c06de8..3acc3d1 100644
--- a/include/pel.h
+++ b/include/pel.h
@@ -16,6 +16,8 @@
#ifndef __PEL_H
#define __PEL_H
+#include <errorlog.h>
+
/* Data Structures for PEL data. */
#define PRIVATE_HEADER_SECTION_SIZE 48
diff --git a/include/pool.h b/include/pool.h
new file mode 100644
index 0000000..89ef967
--- /dev/null
+++ b/include/pool.h
@@ -0,0 +1,21 @@
+#ifndef __POOL_H
+#define __POOL_H
+
+#include <ccan/list/list.h>
+#include <stddef.h>
+
+struct pool {
+ void *buf;
+ size_t obj_size;
+ struct list_head free_list;
+ int free_count;
+ int reserved;
+};
+
+enum pool_priority {POOL_NORMAL, POOL_HIGH};
+
+void* pool_get(struct pool *pool, enum pool_priority priority);
+void pool_free_object(struct pool *pool, void *obj);
+int pool_init(struct pool *pool, size_t obj_size, int count, int reserved);
+
+#endif /* __POOL_H */