1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
|
/*
* QEMU PowerPC SPI model
*
* Copyright (c) 2024, IBM Corporation.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "hw/qdev-properties.h"
#include "hw/ppc/pnv_xscom.h"
#include "hw/ssi/pnv_spi.h"
#include "hw/ssi/pnv_spi_regs.h"
#include "hw/ssi/ssi.h"
#include <libfdt.h>
#include "hw/irq.h"
#include "trace.h"
#define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
#define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
/*
* Macro from include/hw/ppc/fdt.h
* fdt.h cannot be included here as it contain ppc target specific dependency.
*/
#define _FDT(exp) \
do { \
int _ret = (exp); \
if (_ret < 0) { \
qemu_log_mask(LOG_GUEST_ERROR, \
"error creating device tree: %s: %s", \
#exp, fdt_strerror(_ret)); \
exit(1); \
} \
} while (0)
/* PnvXferBuffer */
typedef struct PnvXferBuffer {
uint32_t len;
uint8_t *data;
} PnvXferBuffer;
/* pnv_spi_xfer_buffer_methods */
static PnvXferBuffer *pnv_spi_xfer_buffer_new(void)
{
PnvXferBuffer *payload = g_malloc0(sizeof(*payload));
return payload;
}
static void pnv_spi_xfer_buffer_free(PnvXferBuffer *payload)
{
g_free(payload->data);
g_free(payload);
}
static uint8_t *pnv_spi_xfer_buffer_write_ptr(PnvXferBuffer *payload,
uint32_t offset, uint32_t length)
{
if (payload->len < (offset + length)) {
payload->len = offset + length;
payload->data = g_realloc(payload->data, payload->len);
}
return &payload->data[offset];
}
static bool does_rdr_match(PnvSpi *s)
{
/*
* According to spec, the mask bits that are 0 are compared and the
* bits that are 1 are ignored.
*/
uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK,
s->regs[SPI_MM_REG]);
uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL,
s->regs[SPI_MM_REG]);
if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
return true;
}
return false;
}
static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
{
uint8_t byte;
/*
* Offset is an index between 0 and PNV_SPI_REG_SIZE - 1
* Check the offset before using it.
*/
if (offset < PNV_SPI_REG_SIZE) {
byte = (s->regs[SPI_XMIT_DATA_REG] >> (56 - offset * 8)) & 0xFF;
} else {
/*
* Log an error and return a 0xFF since we have to assign something
* to byte before returning.
*/
qemu_log_mask(LOG_GUEST_ERROR, "Invalid offset = %d used to get byte "
"from TDR\n", offset);
byte = 0xff;
}
return byte;
}
static uint8_t read_from_frame(PnvSpi *s, uint8_t *read_buf, uint8_t nr_bytes,
uint8_t ecc_count, uint8_t shift_in_count)
{
uint8_t byte;
int count = 0;
while (count < nr_bytes) {
shift_in_count++;
if ((ecc_count != 0) &&
(shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
shift_in_count = 0;
} else {
byte = read_buf[count];
trace_pnv_spi_shift_rx(byte, count);
s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
}
count++;
} /* end of while */
return shift_in_count;
}
static void spi_response(PnvSpi *s, int bits, PnvXferBuffer *rsp_payload)
{
uint8_t ecc_count;
uint8_t shift_in_count;
/*
* Processing here must handle:
* - Which bytes in the payload we should move to the RDR
* - Explicit mode counter configuration settings
* - RDR full and RDR overrun status
*/
/*
* First check that the response payload is the exact same
* number of bytes as the request payload was
*/
if (rsp_payload->len != (s->N1_bytes + s->N2_bytes)) {
qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
"bytes, expected %d, got %d\n",
(s->N1_bytes + s->N2_bytes), rsp_payload->len);
} else {
uint8_t ecc_control;
trace_pnv_spi_rx_received(rsp_payload->len);
trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
/*
* Adding an ECC count let's us know when we have found a payload byte
* that was shifted in but cannot be loaded into RDR. Bits 29-30 of
* clock_config_reset_control register equal to either 0b00 or 0b10
* indicate that we are taking in data with ECC and either applying
* the ECC or discarding it.
*/
ecc_count = 0;
ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
if (ecc_control == 0 || ecc_control == 2) {
ecc_count = 1;
}
/*
* Use the N1_rx and N2_rx counts to control shifting data from the
* payload into the RDR. Keep an overall count of the number of bytes
* shifted into RDR so we can discard every 9th byte when ECC is
* enabled.
*/
shift_in_count = 0;
/* Handle the N1 portion of the frame first */
if (s->N1_rx != 0) {
trace_pnv_spi_rx_read_N1frame();
shift_in_count = read_from_frame(s, &rsp_payload->data[0],
s->N1_bytes, ecc_count, shift_in_count);
}
/* Handle the N2 portion of the frame */
if (s->N2_rx != 0) {
trace_pnv_spi_rx_read_N2frame();
shift_in_count = read_from_frame(s,
&rsp_payload->data[s->N1_bytes], s->N2_bytes,
ecc_count, shift_in_count);
}
if ((s->N1_rx + s->N2_rx) > 0) {
/*
* Data was received so handle RDR status.
* It is easier to handle RDR_full and RDR_overrun status here
* since the RDR register's shift_byte_in method is called
* multiple times in a row. Controlling RDR status is done here
* instead of in the RDR scoped methods for that reason.
*/
if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
/*
* Data was shifted into the RDR before having been read
* causing previous data to have been overrun.
*/
s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status, 1);
} else {
/*
* Set status to indicate that the received data register is
* full. This flag is only cleared once the RDR is unloaded.
*/
s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 1);
}
}
} /* end of else */
} /* end of spi_response() */
static void transfer(PnvSpi *s, PnvXferBuffer *payload)
{
uint32_t tx;
uint32_t rx;
PnvXferBuffer *rsp_payload = NULL;
rsp_payload = pnv_spi_xfer_buffer_new();
if (!rsp_payload) {
return;
}
for (int offset = 0; offset < payload->len; offset += s->transfer_len) {
tx = 0;
for (int i = 0; i < s->transfer_len; i++) {
if ((offset + i) >= payload->len) {
tx <<= 8;
} else {
tx = (tx << 8) | payload->data[offset + i];
}
}
rx = ssi_transfer(s->ssi_bus, tx);
for (int i = 0; i < s->transfer_len; i++) {
if ((offset + i) >= payload->len) {
break;
}
*(pnv_spi_xfer_buffer_write_ptr(rsp_payload, rsp_payload->len, 1)) =
(rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
}
}
spi_response(s, s->N1_bits, rsp_payload);
pnv_spi_xfer_buffer_free(rsp_payload);
}
static inline uint8_t get_seq_index(PnvSpi *s)
{
return GETFIELD(SPI_STS_SEQ_INDEX, s->status);
}
static inline void next_sequencer_fsm(PnvSpi *s)
{
uint8_t seq_index = get_seq_index(s);
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, (seq_index + 1));
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
}
/*
* Calculate the N1 counters based on passed in opcode and
* internal register values.
* The method assumes that the opcode is a Shift_N1 opcode
* and doesn't test it.
* The counters returned are:
* N1 bits: Number of bits in the payload data that are significant
* to the responder.
* N1_bytes: Total count of payload bytes for the N1 (portion of the) frame.
* N1_tx: Total number of bytes taken from TDR for N1
* N1_rx: Total number of bytes taken from the payload for N1
*/
static void calculate_N1(PnvSpi *s, uint8_t opcode)
{
/*
* Shift_N1 opcode form: 0x3M
* Implicit mode:
* If M != 0 the shift count is M bytes and M is the number of tx bytes.
* Forced Implicit mode:
* M is the shift count but tx and rx is determined by the count control
* register fields. Note that we only check for forced Implicit mode when
* M != 0 since the mode doesn't make sense when M = 0.
* Explicit mode:
* If M == 0 then shift count is number of bits defined in the
* Counter Configuration Register's shift_count_N1 field.
*/
if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
/* Explicit mode */
s->N1_bits = GETFIELD(SPI_CTR_CFG_N1, s->regs[SPI_CTR_CFG_REG]);
s->N1_bytes = (s->N1_bits + 7) / 8;
s->N1_tx = 0;
s->N1_rx = 0;
/* If tx count control for N1 is set, load the tx value */
if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N1_tx = s->N1_bytes;
}
/* If rx count control for N1 is set, load the rx value */
if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N1_rx = s->N1_bytes;
}
} else {
/* Implicit mode/Forced Implicit mode, use M field from opcode */
s->N1_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
s->N1_bits = s->N1_bytes * 8;
/*
* Assume that we are going to transmit the count
* (pure Implicit only)
*/
s->N1_tx = s->N1_bytes;
s->N1_rx = 0;
/* Let Forced Implicit mode have an effect on the counts */
if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
/*
* If Forced Implicit mode and count control doesn't
* indicate transmit then reset the tx count to 0
*/
if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2,
s->regs[SPI_CTR_CFG_REG]) == 0) {
s->N1_tx = 0;
}
/* If rx count control for N1 is set, load the rx value */
if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3,
s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N1_rx = s->N1_bytes;
}
}
}
/*
* Enforce an upper limit on the size of N1 that is equal to the known size
* of the shift register, 64 bits or 72 bits if ECC is enabled.
* If the size exceeds 72 bits it is a user error so log an error,
* cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
* error bit.
*/
uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
s->regs[SPI_CLK_CFG_REG]);
if (ecc_control == 0 || ecc_control == 2) {
if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
"ECC enabled, bytes = 0x%x, bits = 0x%x\n",
s->N1_bytes, s->N1_bits);
s->N1_bytes = PNV_SPI_REG_SIZE + 1;
s->N1_bits = s->N1_bytes * 8;
}
} else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
"bytes = 0x%x, bits = 0x%x\n",
s->N1_bytes, s->N1_bits);
s->N1_bytes = PNV_SPI_REG_SIZE;
s->N1_bits = s->N1_bytes * 8;
}
} /* end of calculate_N1 */
/*
* Shift_N1 operation handler method
*/
static bool operation_shiftn1(PnvSpi *s, uint8_t opcode,
PnvXferBuffer **payload, bool send_n1_alone)
{
uint8_t n1_count;
bool stop = false;
/*
* If there isn't a current payload left over from a stopped sequence
* create a new one.
*/
if (*payload == NULL) {
*payload = pnv_spi_xfer_buffer_new();
}
/*
* Use a combination of N1 counters to build the N1 portion of the
* transmit payload.
* We only care about transmit at this time since the request payload
* only represents data going out on the controller output line.
* Leave mode specific considerations in the calculate function since
* all we really care about are counters that tell use exactly how
* many bytes are in the payload and how many of those bytes to
* include from the TDR into the payload.
*/
calculate_N1(s, opcode);
trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
/*
* Zero out the N2 counters here in case there is no N2 operation following
* the N1 operation in the sequencer. This keeps leftover N2 information
* from interfering with spi_response logic.
*/
s->N2_bits = 0;
s->N2_bytes = 0;
s->N2_tx = 0;
s->N2_rx = 0;
/*
* N1_bytes is the overall size of the N1 portion of the frame regardless of
* whether N1 is used for tx, rx or both. Loop over the size to build a
* payload that is N1_bytes long.
* N1_tx is the count of bytes to take from the TDR and "shift" into the
* frame which means append those bytes to the payload for the N1 portion
* of the frame.
* If N1_tx is 0 or if the count exceeds the size of the TDR append 0xFF to
* the frame until the overall N1 count is reached.
*/
n1_count = 0;
while (n1_count < s->N1_bytes) {
/*
* Assuming that if N1_tx is not equal to 0 then it is the same as
* N1_bytes.
*/
if ((s->N1_tx != 0) && (n1_count < PNV_SPI_REG_SIZE)) {
if (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1) {
/*
* Note that we are only appending to the payload IF the TDR
* is full otherwise we don't touch the payload because we are
* going to NOT send the payload and instead tell the sequencer
* that called us to stop and wait for a TDR write so we have
* data to load into the payload.
*/
uint8_t n1_byte = 0x00;
n1_byte = get_from_offset(s, n1_count);
trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
*(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1)) =
n1_byte;
} else {
/*
* We hit a shift_n1 opcode TX but the TDR is empty, tell the
* sequencer to stop and break this loop.
*/
trace_pnv_spi_sequencer_stop_requested("Shift N1"
"set for transmit but TDR is empty");
stop = true;
break;
}
} else {
/*
* Cases here:
* - we are receiving during the N1 frame segment and the RDR
* is full so we need to stop until the RDR is read
* - we are transmitting and we don't care about RDR status
* since we won't be loading RDR during the frame segment.
* - we are receiving and the RDR is empty so we allow the operation
* to proceed.
*/
if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL,
s->status) == 1)) {
trace_pnv_spi_sequencer_stop_requested("shift N1"
"set for receive but RDR is full");
stop = true;
break;
} else {
trace_pnv_spi_tx_append_FF("n1_byte");
*(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
= 0xff;
}
}
n1_count++;
} /* end of while */
/*
* If we are not stopping due to an empty TDR and we are doing an N1 TX
* and the TDR is full we need to clear the TDR_full status.
* Do this here instead of up in the loop above so we don't log the message
* in every loop iteration.
* Ignore the send_n1_alone flag, all that does is defer the TX until the N2
* operation, which was found immediately after the current opcode. The TDR
* was unloaded and will be shifted so we have to clear the TDR_full status.
*/
if (!stop && (s->N1_tx != 0) &&
(GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
}
/*
* There are other reasons why the shifter would stop, such as a TDR empty
* or RDR full condition with N1 set to receive. If we haven't stopped due
* to either one of those conditions then check if the send_n1_alone flag is
* equal to False, indicating the next opcode is an N2 operation, AND if
* the N2 counter reload switch (bit 0 of the N2 count control field) is
* set. This condition requires a pacing write to "kick" off the N2
* shift which includes the N1 shift as well when send_n1_alone is False.
*/
if (!stop && !send_n1_alone &&
(GETFIELD(SPI_CTR_CFG_N2_CTRL_B0, s->regs[SPI_CTR_CFG_REG]) == 1)) {
trace_pnv_spi_sequencer_stop_requested("N2 counter reload "
"active, stop N1 shift, TDR_underrun set to 1");
stop = true;
s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 1);
}
/*
* If send_n1_alone is set AND we have a full TDR then this is the first and
* last payload to send and we don't have an N2 frame segment to add to the
* payload.
*/
if (send_n1_alone && !stop) {
/* We have a TX and a full TDR or an RX and an empty RDR */
trace_pnv_spi_tx_request("Shifting N1 frame", (*payload)->len);
transfer(s, *payload);
/* The N1 frame shift is complete so reset the N1 counters */
s->N2_bits = 0;
s->N2_bytes = 0;
s->N2_tx = 0;
s->N2_rx = 0;
pnv_spi_xfer_buffer_free(*payload);
*payload = NULL;
}
return stop;
} /* end of operation_shiftn1() */
/*
* Calculate the N2 counters based on passed in opcode and
* internal register values.
* The method assumes that the opcode is a Shift_N2 opcode
* and doesn't test it.
* The counters returned are:
* N2 bits: Number of bits in the payload data that are significant
* to the responder.
* N2_bytes: Total count of payload bytes for the N2 frame.
* N2_tx: Total number of bytes taken from TDR for N2
* N2_rx: Total number of bytes taken from the payload for N2
*/
static void calculate_N2(PnvSpi *s, uint8_t opcode)
{
/*
* Shift_N2 opcode form: 0x4M
* Implicit mode:
* If M!=0 the shift count is M bytes and M is the number of rx bytes.
* Forced Implicit mode:
* M is the shift count but tx and rx is determined by the count control
* register fields. Note that we only check for Forced Implicit mode when
* M != 0 since the mode doesn't make sense when M = 0.
* Explicit mode:
* If M==0 then shift count is number of bits defined in the
* Counter Configuration Register's shift_count_N1 field.
*/
if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
/* Explicit mode */
s->N2_bits = GETFIELD(SPI_CTR_CFG_N2, s->regs[SPI_CTR_CFG_REG]);
s->N2_bytes = (s->N2_bits + 7) / 8;
s->N2_tx = 0;
s->N2_rx = 0;
/* If tx count control for N2 is set, load the tx value */
if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N2_tx = s->N2_bytes;
}
/* If rx count control for N2 is set, load the rx value */
if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N2_rx = s->N2_bytes;
}
} else {
/* Implicit mode/Forced Implicit mode, use M field from opcode */
s->N2_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
s->N2_bits = s->N2_bytes * 8;
/* Assume that we are going to receive the count */
s->N2_rx = s->N2_bytes;
s->N2_tx = 0;
/* Let Forced Implicit mode have an effect on the counts */
if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
/*
* If Forced Implicit mode and count control doesn't
* indicate a receive then reset the rx count to 0
*/
if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3,
s->regs[SPI_CTR_CFG_REG]) == 0) {
s->N2_rx = 0;
}
/* If tx count control for N2 is set, load the tx value */
if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2,
s->regs[SPI_CTR_CFG_REG]) == 1) {
s->N2_tx = s->N2_bytes;
}
}
}
/*
* Enforce an upper limit on the size of N1 that is equal to the
* known size of the shift register, 64 bits or 72 bits if ECC
* is enabled.
* If the size exceeds 72 bits it is a user error so log an error,
* cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
* error bit.
*/
uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL,
s->regs[SPI_CLK_CFG_REG]);
if (ecc_control == 0 || ecc_control == 2) {
if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
/* Unsupported N2 shift size when ECC enabled */
s->N2_bytes = PNV_SPI_REG_SIZE + 1;
s->N2_bits = s->N2_bytes * 8;
}
} else if (s->N2_bytes > PNV_SPI_REG_SIZE) {
/* Unsupported N2 shift size */
s->N2_bytes = PNV_SPI_REG_SIZE;
s->N2_bits = s->N2_bytes * 8;
}
} /* end of calculate_N2 */
/*
* Shift_N2 operation handler method
*/
static bool operation_shiftn2(PnvSpi *s, uint8_t opcode,
PnvXferBuffer **payload)
{
uint8_t n2_count;
bool stop = false;
/*
* If there isn't a current payload left over from a stopped sequence
* create a new one.
*/
if (*payload == NULL) {
*payload = pnv_spi_xfer_buffer_new();
}
/*
* Use a combination of N2 counters to build the N2 portion of the
* transmit payload.
*/
calculate_N2(s, opcode);
trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
/*
* The only difference between this code and the code for shift N1 is
* that this code has to account for the possible presence of N1 transmit
* bytes already taken from the TDR.
* If there are bytes to be transmitted for the N2 portion of the frame
* and there are still bytes in TDR that have not been copied into the
* TX data of the payload, this code will handle transmitting those
* remaining bytes.
* If for some reason the transmit count(s) add up to more than the size
* of the TDR we will just append 0xFF to the transmit payload data until
* the payload is N1 + N2 bytes long.
*/
n2_count = 0;
while (n2_count < s->N2_bytes) {
/*
* If the RDR is full and we need to RX just bail out, letting the
* code continue will end up building the payload twice in the same
* buffer since RDR full causes a sequence stop and restart.
*/
if ((s->N2_rx != 0) &&
(GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
trace_pnv_spi_sequencer_stop_requested("shift N2 set"
"for receive but RDR is full");
stop = true;
break;
}
if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) <
PNV_SPI_REG_SIZE)) {
/* Always append data for the N2 segment if it is set for TX */
uint8_t n2_byte = 0x00;
n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
*(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
= n2_byte;
} else {
/*
* Regardless of whether or not N2 is set for TX or RX, we need
* the number of bytes in the payload to match the overall length
* of the operation.
*/
trace_pnv_spi_tx_append_FF("n2_byte");
*(pnv_spi_xfer_buffer_write_ptr(*payload, (*payload)->len, 1))
= 0xff;
}
n2_count++;
} /* end of while */
if (!stop) {
/* We have a TX and a full TDR or an RX and an empty RDR */
trace_pnv_spi_tx_request("Shifting N2 frame", (*payload)->len);
transfer(s, *payload);
/*
* If we are doing an N2 TX and the TDR is full we need to clear the
* TDR_full status. Do this here instead of up in the loop above so we
* don't log the message in every loop iteration.
*/
if ((s->N2_tx != 0) &&
(GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
}
/*
* The N2 frame shift is complete so reset the N2 counters.
* Reset the N1 counters also in case the frame was a combination of
* N1 and N2 segments.
*/
s->N2_bits = 0;
s->N2_bytes = 0;
s->N2_tx = 0;
s->N2_rx = 0;
s->N1_bits = 0;
s->N1_bytes = 0;
s->N1_tx = 0;
s->N1_rx = 0;
pnv_spi_xfer_buffer_free(*payload);
*payload = NULL;
}
return stop;
} /* end of operation_shiftn2()*/
static void operation_sequencer(PnvSpi *s)
{
/*
* Loop through each sequencer operation ID and perform the requested
* operations.
* Flag for indicating if we should send the N1 frame or wait to combine
* it with a preceding N2 frame.
*/
bool send_n1_alone = true;
bool stop = false; /* Flag to stop the sequencer */
uint8_t opcode = 0;
uint8_t masked_opcode = 0;
/*
* PnvXferBuffer for containing the payload of the SPI frame.
* This is a static because there are cases where a sequence has to stop
* and wait for the target application to unload the RDR. If this occurs
* during a sequence where N1 is not sent alone and instead combined with
* N2 since the N1 tx length + the N2 tx length is less than the size of
* the TDR.
*/
static PnvXferBuffer *payload;
if (payload == NULL) {
payload = pnv_spi_xfer_buffer_new();
}
/*
* Clear the sequencer FSM error bit - general_SPI_status[3]
* before starting a sequence.
*/
s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 0);
/*
* If the FSM is idle set the sequencer index to 0
* (new/restarted sequence)
*/
if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) {
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
}
/*
* There are only 8 possible operation IDs to iterate through though
* some operations may cause more than one frame to be sequenced.
*/
while (get_seq_index(s) < NUM_SEQ_OPS) {
opcode = s->seq_op[get_seq_index(s)];
/* Set sequencer state to decode */
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
/*
* Only the upper nibble of the operation ID is needed to know what
* kind of operation is requested.
*/
masked_opcode = PNV_SPI_MASKED_OPCODE(opcode);
switch (masked_opcode) {
/*
* Increment the operation index in each case instead of just
* once at the end in case an operation like the branch
* operation needs to change the index.
*/
case SEQ_OP_STOP:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
/* A stop operation in any position stops the sequencer */
trace_pnv_spi_sequencer_op("STOP", get_seq_index(s));
stop = true;
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
s->loop_counter_1 = 0;
s->loop_counter_2 = 0;
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
break;
case SEQ_OP_SELECT_SLAVE:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
trace_pnv_spi_sequencer_op("SELECT_SLAVE", get_seq_index(s));
/*
* This device currently only supports a single responder
* connection at position 0. De-selecting a responder is fine
* and expected at the end of a sequence but selecting any
* responder other than 0 should cause an error.
*/
s->responder_select = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
if (s->responder_select == 0) {
trace_pnv_spi_shifter_done();
qemu_set_irq(s->cs_line[0], 1);
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
(get_seq_index(s) + 1));
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
} else if (s->responder_select != 1) {
qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
"not supported, select = 0x%x\n",
s->responder_select);
trace_pnv_spi_sequencer_stop_requested("invalid "
"responder select");
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
stop = true;
} else {
/*
* Only allow an FSM_START state when a responder is
* selected
*/
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_START);
trace_pnv_spi_shifter_stating();
qemu_set_irq(s->cs_line[0], 0);
/*
* A Shift_N2 operation is only valid after a Shift_N1
* according to the spec. The spec doesn't say if that means
* immediately after or just after at any point. We will track
* the occurrence of a Shift_N1 to enforce this requirement in
* the most generic way possible by assuming that the rule
* applies once a valid responder select has occurred.
*/
s->shift_n1_done = false;
next_sequencer_fsm(s);
}
break;
case SEQ_OP_SHIFT_N1:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
trace_pnv_spi_sequencer_op("SHIFT_N1", get_seq_index(s));
/*
* Only allow a shift_n1 when the state is not IDLE or DONE.
* In either of those two cases the sequencer is not in a proper
* state to perform shift operations because the sequencer has:
* - processed a responder deselect (DONE)
* - processed a stop opcode (IDLE)
* - encountered an error (IDLE)
*/
if ((GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_IDLE) ||
(GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_DONE)) {
qemu_log_mask(LOG_GUEST_ERROR, "Shift_N1 not allowed in "
"shifter state = 0x%llx", GETFIELD(
SPI_STS_SHIFTER_FSM, s->status));
/*
* Set sequencer FSM error bit 3 (general_SPI_status[3])
* in status reg.
*/
s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
trace_pnv_spi_sequencer_stop_requested("invalid shifter state");
stop = true;
} else {
/*
* Look for the special case where there is a shift_n1 set for
* transmit and it is followed by a shift_n2 set for transmit
* AND the combined transmit length of the two operations is
* less than or equal to the size of the TDR register. In this
* case we want to use both this current shift_n1 opcode and the
* following shift_n2 opcode to assemble the frame for
* transmission to the responder without requiring a refill of
* the TDR between the two operations.
*/
if (PNV_SPI_MASKED_OPCODE(s->seq_op[get_seq_index(s) + 1])
== SEQ_OP_SHIFT_N2) {
send_n1_alone = false;
}
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
FSM_SHIFT_N1);
stop = operation_shiftn1(s, opcode, &payload, send_n1_alone);
if (stop) {
/*
* The operation code says to stop, this can occur if:
* (1) RDR is full and the N1 shift is set for receive
* (2) TDR was empty at the time of the N1 shift so we need
* to wait for data.
* (3) Neither 1 nor 2 are occurring and we aren't sending
* N1 alone and N2 counter reload is set (bit 0 of the N2
* counter reload field). In this case TDR_underrun will
* will be set and the Payload has been loaded so it is
* ok to advance the sequencer.
*/
if (GETFIELD(SPI_STS_TDR_UNDERRUN, s->status)) {
s->shift_n1_done = true;
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
FSM_SHIFT_N2);
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
(get_seq_index(s) + 1));
} else {
/*
* This is case (1) or (2) so the sequencer needs to
* wait and NOT go to the next sequence yet.
*/
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
FSM_WAIT);
}
} else {
/* Ok to move on to the next index */
s->shift_n1_done = true;
next_sequencer_fsm(s);
}
}
break;
case SEQ_OP_SHIFT_N2:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
trace_pnv_spi_sequencer_op("SHIFT_N2", get_seq_index(s));
if (!s->shift_n1_done) {
qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
"Shift_N1 is not done, shifter state = 0x%llx",
GETFIELD(SPI_STS_SHIFTER_FSM, s->status));
/*
* In case the sequencer actually stops if an N2 shift is
* requested before any N1 shift is done. Set sequencer FSM
* error bit 3 (general_SPI_status[3]) in status reg.
*/
s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
trace_pnv_spi_sequencer_stop_requested("shift_n2 "
"w/no shift_n1 done");
stop = true;
} else {
/* Ok to do a Shift_N2 */
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
FSM_SHIFT_N2);
stop = operation_shiftn2(s, opcode, &payload);
/*
* If the operation code says to stop set the shifter state to
* wait and stop
*/
if (stop) {
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
FSM_WAIT);
} else {
/* Ok to move on to the next index */
next_sequencer_fsm(s);
}
}
break;
case SEQ_OP_BRANCH_IFNEQ_RDR:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", get_seq_index(s));
/*
* The memory mapping register RDR match value is compared against
* the 16 rightmost bytes of the RDR (potentially with masking).
* Since this comparison is performed against the contents of the
* RDR then a receive must have previously occurred otherwise
* there is no data to compare and the operation cannot be
* completed and will stop the sequencer until RDR full is set to
* 1.
*/
if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
bool rdr_matched = false;
rdr_matched = does_rdr_match(s);
if (rdr_matched) {
trace_pnv_spi_RDR_match("success");
/* A match occurred, increment the sequencer index. */
next_sequencer_fsm(s);
} else {
trace_pnv_spi_RDR_match("failed");
/*
* Branch the sequencer to the index coded into the op
* code.
*/
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
PNV_SPI_OPCODE_LO_NIBBLE(opcode));
}
/*
* Regardless of where the branch ended up we want the
* sequencer to continue shifting so we have to clear
* RDR_full.
*/
s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
} else {
trace_pnv_spi_sequencer_stop_requested("RDR not"
"full for 0x6x opcode");
stop = true;
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
}
break;
case SEQ_OP_TRANSFER_TDR:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
next_sequencer_fsm(s);
break;
case SEQ_OP_BRANCH_IFNEQ_INC_1:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", get_seq_index(s));
/*
* The spec says the loop should execute count compare + 1 times.
* However we learned from engineering that we really only loop
* count_compare times, count compare = 0 makes this op code a
* no-op
*/
if (s->loop_counter_1 !=
GETFIELD(SPI_CTR_CFG_CMP1, s->regs[SPI_CTR_CFG_REG])) {
/*
* Next index is the lower nibble of the branch operation ID,
* mask off all but the first three bits so we don't try to
* access beyond the sequencer_operation_reg boundary.
*/
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status,
PNV_SPI_OPCODE_LO_NIBBLE(opcode));
s->loop_counter_1++;
} else {
/* Continue to next index if loop counter is reached */
next_sequencer_fsm(s);
}
break;
case SEQ_OP_BRANCH_IFNEQ_INC_2:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", get_seq_index(s));
uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
s->regs[SPI_CTR_CFG_REG]);
/*
* The spec says the loop should execute count compare + 1 times.
* However we learned from engineering that we really only loop
* count_compare times, count compare = 0 makes this op code a
* no-op
*/
if (s->loop_counter_2 != condition2) {
/*
* Next index is the lower nibble of the branch operation ID,
* mask off all but the first three bits so we don't try to
* access beyond the sequencer_operation_reg boundary.
*/
s->status = SETFIELD(SPI_STS_SEQ_INDEX,
s->status, PNV_SPI_OPCODE_LO_NIBBLE(opcode));
s->loop_counter_2++;
} else {
/* Continue to next index if loop counter is reached */
next_sequencer_fsm(s);
}
break;
default:
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
/* Ignore unsupported operations. */
next_sequencer_fsm(s);
break;
} /* end of switch */
/*
* If we used all 8 opcodes without seeing a 00 - STOP in the sequence
* we need to go ahead and end things as if there was a STOP at the
* end.
*/
if (get_seq_index(s) == NUM_SEQ_OPS) {
/* All 8 opcodes completed, sequencer idling */
s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
s->loop_counter_1 = 0;
s->loop_counter_2 = 0;
s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
break;
}
/* Break the loop if a stop was requested */
if (stop) {
break;
}
} /* end of while */
return;
} /* end of operation_sequencer() */
/*
* The SPIC engine and its internal sequencer can be interrupted and reset by
* a hardware signal, the sbe_spicst_hard_reset bits from Pervasive
* Miscellaneous Register of sbe_register_bo device.
* Reset immediately aborts any SPI transaction in progress and returns the
* sequencer and state machines to idle state.
* The configuration register values are not changed. The status register is
* not reset. The engine registers are not reset.
* The SPIC engine reset does not have any affect on the attached devices.
* Reset handling of any attached devices is beyond the scope of the engine.
*/
static void do_reset(DeviceState *dev)
{
PnvSpi *s = PNV_SPI(dev);
DeviceState *ssi_dev;
trace_pnv_spi_reset();
/* Connect cs irq */
ssi_dev = ssi_get_cs(s->ssi_bus, 0);
if (ssi_dev) {
qemu_irq cs_line = qdev_get_gpio_in_named(ssi_dev, SSI_GPIO_CS, 0);
qdev_connect_gpio_out_named(DEVICE(s), "cs", 0, cs_line);
}
/* Reset all N1 and N2 counters, and other constants */
s->N2_bits = 0;
s->N2_bytes = 0;
s->N2_tx = 0;
s->N2_rx = 0;
s->N1_bits = 0;
s->N1_bytes = 0;
s->N1_tx = 0;
s->N1_rx = 0;
s->loop_counter_1 = 0;
s->loop_counter_2 = 0;
/* Disconnected from responder */
qemu_set_irq(s->cs_line[0], 1);
}
static uint64_t pnv_spi_xscom_read(void *opaque, hwaddr addr, unsigned size)
{
PnvSpi *s = PNV_SPI(opaque);
uint32_t reg = addr >> 3;
uint64_t val = ~0ull;
switch (reg) {
case ERROR_REG:
case SPI_CTR_CFG_REG:
case CONFIG_REG1:
case SPI_CLK_CFG_REG:
case SPI_MM_REG:
case SPI_XMIT_DATA_REG:
val = s->regs[reg];
break;
case SPI_RCV_DATA_REG:
val = s->regs[reg];
trace_pnv_spi_read_RDR(val);
s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
if (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_WAIT) {
trace_pnv_spi_start_sequencer();
operation_sequencer(s);
}
break;
case SPI_SEQ_OP_REG:
val = 0;
for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
val = (val << 8) | s->seq_op[i];
}
break;
case SPI_STS_REG:
val = s->status;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
"read at 0x%" PRIx32 "\n", reg);
}
trace_pnv_spi_read(addr, val);
return val;
}
static void pnv_spi_xscom_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
PnvSpi *s = PNV_SPI(opaque);
uint32_t reg = addr >> 3;
trace_pnv_spi_write(addr, val);
switch (reg) {
case ERROR_REG:
case SPI_CTR_CFG_REG:
case CONFIG_REG1:
case SPI_MM_REG:
case SPI_RCV_DATA_REG:
s->regs[reg] = val;
break;
case SPI_CLK_CFG_REG:
/*
* To reset the SPI controller write the sequence 0x5 0xA to
* reset_control field
*/
if ((GETFIELD(SPI_CLK_CFG_RST_CTRL, s->regs[SPI_CLK_CFG_REG]) == 0x5)
&& (GETFIELD(SPI_CLK_CFG_RST_CTRL, val) == 0xA)) {
/* SPI controller reset sequence completed, resetting */
s->regs[reg] = SPI_CLK_CFG_HARD_RST;
} else {
s->regs[reg] = val;
}
break;
case SPI_XMIT_DATA_REG:
/*
* Writing to the transmit data register causes the transmit data
* register full status bit in the status register to be set. Writing
* when the transmit data register full status bit is already set
* causes a "Resource Not Available" condition. This is not possible
* in the model since writes to this register are not asynchronous to
* the operation sequence like it would be in hardware.
*/
s->regs[reg] = val;
trace_pnv_spi_write_TDR(val);
s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 1);
s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 0);
trace_pnv_spi_start_sequencer();
operation_sequencer(s);
break;
case SPI_SEQ_OP_REG:
for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
s->seq_op[i] = (val >> (56 - i * 8)) & 0xFF;
}
break;
case SPI_STS_REG:
/* other fields are ignore_write */
s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status,
GETFIELD(SPI_STS_RDR, val));
s->status = SETFIELD(SPI_STS_TDR_OVERRUN, s->status,
GETFIELD(SPI_STS_TDR, val));
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
"write at 0x%" PRIx32 "\n", reg);
}
return;
}
static const MemoryRegionOps pnv_spi_xscom_ops = {
.read = pnv_spi_xscom_read,
.write = pnv_spi_xscom_write,
.valid.min_access_size = 8,
.valid.max_access_size = 8,
.impl.min_access_size = 8,
.impl.max_access_size = 8,
.endianness = DEVICE_BIG_ENDIAN,
};
static const Property pnv_spi_properties[] = {
DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
};
static void pnv_spi_realize(DeviceState *dev, Error **errp)
{
PnvSpi *s = PNV_SPI(dev);
g_autofree char *name = g_strdup_printf(TYPE_PNV_SPI_BUS ".%d",
s->spic_num);
s->ssi_bus = ssi_create_bus(dev, name);
s->cs_line = g_new0(qemu_irq, 1);
qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
/* spi scoms */
pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
}
static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt,
int offset)
{
PnvSpi *s = PNV_SPI(dev);
g_autofree char *name;
int s_offset;
const char compat[] = "ibm,power10-spi";
uint32_t spic_pcba = PNV10_XSCOM_PIB_SPIC_BASE +
s->spic_num * PNV10_XSCOM_PIB_SPIC_SIZE;
uint32_t reg[] = {
cpu_to_be32(spic_pcba),
cpu_to_be32(PNV10_XSCOM_PIB_SPIC_SIZE)
};
name = g_strdup_printf("pnv_spi@%x", spic_pcba);
s_offset = fdt_add_subnode(fdt, offset, name);
_FDT(s_offset);
_FDT(fdt_setprop(fdt, s_offset, "reg", reg, sizeof(reg)));
_FDT(fdt_setprop(fdt, s_offset, "compatible", compat, sizeof(compat)));
_FDT((fdt_setprop_cell(fdt, s_offset, "spic_num#", s->spic_num)));
return 0;
}
static void pnv_spi_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
xscomc->dt_xscom = pnv_spi_dt_xscom;
dc->desc = "PowerNV SPI";
dc->realize = pnv_spi_realize;
device_class_set_legacy_reset(dc, do_reset);
device_class_set_props(dc, pnv_spi_properties);
}
static const TypeInfo pnv_spi_info = {
.name = TYPE_PNV_SPI,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PnvSpi),
.class_init = pnv_spi_class_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_PNV_XSCOM_INTERFACE },
{ }
}
};
static void pnv_spi_register_types(void)
{
type_register_static(&pnv_spi_info);
}
type_init(pnv_spi_register_types);
|