blob: bb14a1a25febba86edcbe50670a60bbd50e3bcd4 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
|
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <anup.patel@wdc.com>
*/
#include <sbi/riscv_locks.h>
#include <sbi/sbi_hart.h>
#include <sbi/sbi_hartmask.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_scratch.h>
#include <sbi/sbi_string.h>
#define DEFAULT_SCRATCH_ALLOC_ALIGN __SIZEOF_POINTER__
u32 sbi_scratch_hart_count;
u32 hartindex_to_hartid_table[SBI_HARTMASK_MAX_BITS] = { [0 ... SBI_HARTMASK_MAX_BITS-1] = -1U };
struct sbi_scratch *hartindex_to_scratch_table[SBI_HARTMASK_MAX_BITS];
static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
/*
* Get the alignment size.
* Return DEFAULT_SCRATCH_ALLOC_ALIGNMENT or riscv,cbom_block_size
*/
static unsigned long sbi_get_scratch_alloc_align(void)
{
const struct sbi_platform *plat = sbi_platform_thishart_ptr();
if (!plat || !plat->cbom_block_size)
return DEFAULT_SCRATCH_ALLOC_ALIGN;
return plat->cbom_block_size;
}
u32 sbi_hartid_to_hartindex(u32 hartid)
{
sbi_for_each_hartindex(i)
if (hartindex_to_hartid_table[i] == hartid)
return i;
return -1U;
}
typedef struct sbi_scratch *(*hartid2scratch)(ulong hartid, ulong hartindex);
int sbi_scratch_init(struct sbi_scratch *scratch)
{
u32 h, hart_count;
const struct sbi_platform *plat = sbi_platform_ptr(scratch);
hart_count = plat->hart_count;
if (hart_count > SBI_HARTMASK_MAX_BITS)
hart_count = SBI_HARTMASK_MAX_BITS;
sbi_scratch_hart_count = hart_count;
sbi_for_each_hartindex(i) {
h = (plat->hart_index2id) ? plat->hart_index2id[i] : i;
hartindex_to_hartid_table[i] = h;
hartindex_to_scratch_table[i] =
((hartid2scratch)scratch->hartid_to_scratch)(h, i);
}
return 0;
}
unsigned long sbi_scratch_alloc_offset(unsigned long size)
{
void *ptr;
unsigned long ret = 0;
struct sbi_scratch *rscratch;
unsigned long scratch_alloc_align = 0;
/*
* We have a simple brain-dead allocator which never expects
* anything to be free-ed hence it keeps incrementing the
* next allocation offset until it runs-out of space.
*
* In future, we will have more sophisticated allocator which
* will allow us to re-claim free-ed space.
*/
if (!size)
return 0;
scratch_alloc_align = sbi_get_scratch_alloc_align();
/*
* We let the allocation align to cacheline bytes to avoid livelock on
* certain platforms due to atomic variables from the same cache line.
*/
size += scratch_alloc_align - 1;
size &= ~(scratch_alloc_align - 1);
spin_lock(&extra_lock);
if (SBI_SCRATCH_SIZE < (extra_offset + size))
goto done;
ret = extra_offset;
extra_offset += size;
done:
spin_unlock(&extra_lock);
if (ret) {
sbi_for_each_hartindex(i) {
rscratch = sbi_hartindex_to_scratch(i);
if (!rscratch)
continue;
ptr = sbi_scratch_offset_ptr(rscratch, ret);
sbi_memset(ptr, 0, size);
}
}
return ret;
}
void sbi_scratch_free_offset(unsigned long offset)
{
if ((offset < SBI_SCRATCH_EXTRA_SPACE_OFFSET) ||
(SBI_SCRATCH_SIZE <= offset))
return;
/*
* We don't actually free-up because it's a simple
* brain-dead allocator.
*/
}
unsigned long sbi_scratch_used_space(void)
{
unsigned long ret = 0;
spin_lock(&extra_lock);
ret = extra_offset;
spin_unlock(&extra_lock);
return ret;
}
|