aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorGavin Shan <gwshan@linux.vnet.ibm.com>2015-07-17 09:12:31 +1000
committerStewart Smith <stewart@linux.vnet.ibm.com>2015-08-14 14:00:17 +1000
commit9af2d0b1bd46cb35026f7eb9fa3b1e4d8e131534 (patch)
tree65f26a7ae8088d146893193624ac2660ee0a4410 /hw
parentaffcb6471add85007e4f284f0f79f65722488f7f (diff)
downloadskiboot-9af2d0b1bd46cb35026f7eb9fa3b1e4d8e131534.zip
skiboot-9af2d0b1bd46cb35026f7eb9fa3b1e4d8e131534.tar.gz
skiboot-9af2d0b1bd46cb35026f7eb9fa3b1e4d8e131534.tar.bz2
hw/phb3: Change reserved PE to 255
Currently, we have reserved PE#0 to which all RIDs are mapped prior to PE assignment request from kernel. The last M64 BAR is configured to have shared mode. So we have to cut off the first M64 segment, which corresponds to reserved PE#0 in kernel. If the first BAR (for example PF's IOV BAR) requires huge alignment in kernel, we have to waste huge M64 space to accomodate the alignment. If we have reserved PE#256, the waste of M64 space will be avoided. Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/phb3.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/hw/phb3.c b/hw/phb3.c
index cdc20e8..6bf73bf 100644
--- a/hw/phb3.c
+++ b/hw/phb3.c
@@ -559,7 +559,8 @@ static void phb3_init_ioda_cache(struct phb3 *p)
* ever let a live FF RTT even temporarily when resetting
* for EEH etc... (HW278969).
*/
- memset(p->rte_cache, 0x00, RTT_TABLE_SIZE);
+ for (i = 0; i < ARRAY_SIZE(p->rte_cache); i++)
+ p->rte_cache[i] = PHB3_RESERVED_PE_NUM;
memset(p->peltv_cache, 0x0, sizeof(p->peltv_cache));
/* Disable all LSI */
@@ -1792,7 +1793,8 @@ static int64_t phb3_set_pe(struct phb *phb,
for (idx = 0; idx < RTT_TABLE_ENTRIES; idx++)
p->rte_cache[idx] = pe_num;
} else {
- memset(p->rte_cache, 0, RTT_TABLE_SIZE);
+ for ( idx = 0; idx < ARRAY_SIZE(p->rte_cache); idx++)
+ p->rte_cache[idx] = PHB3_RESERVED_PE_NUM;
}
memcpy((void *)p->tbl_rtt, p->rte_cache, RTT_TABLE_SIZE);
} else {
@@ -1800,7 +1802,10 @@ static int64_t phb3_set_pe(struct phb *phb,
for (idx = 0; idx < RTT_TABLE_ENTRIES; idx++, rte++) {
if ((idx & mask) != val)
continue;
- p->rte_cache[idx] = (action ? pe_num : 0);
+ if (action == OPAL_MAP_PE)
+ p->rte_cache[idx] = pe_num;
+ else
+ p->rte_cache[idx] = PHB3_RESERVED_PE_NUM;
*rte = p->rte_cache[idx];
}
}
@@ -4026,6 +4031,9 @@ static void phb3_init_hw(struct phb3 *p, bool first_init)
static void phb3_allocate_tables(struct phb3 *p)
{
+ uint16_t *rte;
+ uint32_t i;
+
/* XXX Our current memalign implementation sucks,
*
* It will do the job, however it doesn't support freeing
@@ -4034,7 +4042,9 @@ static void phb3_allocate_tables(struct phb3 *p)
*/
p->tbl_rtt = (uint64_t)local_alloc(p->chip_id, RTT_TABLE_SIZE, RTT_TABLE_SIZE);
assert(p->tbl_rtt);
- memset((void *)p->tbl_rtt, 0, RTT_TABLE_SIZE);
+ rte = (uint16_t *)(p->tbl_rtt);
+ for (i = 0; i < RTT_TABLE_ENTRIES; i++, rte++)
+ *rte = PHB3_RESERVED_PE_NUM;
p->tbl_peltv = (uint64_t)local_alloc(p->chip_id, PELTV_TABLE_SIZE, PELTV_TABLE_SIZE);
assert(p->tbl_peltv);
@@ -4100,7 +4110,8 @@ static void phb3_add_properties(struct phb3 *p)
dt_add_property(np, "ibm,opal-single-pe", NULL, 0);
//dt_add_property_cells(np, "ibm,opal-msi-ports", 2048);
dt_add_property_cells(np, "ibm,opal-num-pes", 256);
- dt_add_property_cells(np, "ibm,opal-reserved-pe", 0);
+ dt_add_property_cells(np, "ibm,opal-reserved-pe",
+ PHB3_RESERVED_PE_NUM);
dt_add_property_cells(np, "ibm,opal-msi-ranges",
p->base_msi, PHB3_MSI_IRQ_COUNT);
tkill = reg + PHB_TCE_KILL;