1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
|
//===-- M68kInstrData.td - M68k Data Movement Instructions -*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file describes the Motorola 680x0 data movement instructions which are
/// the basic means of transferring and storing addresses and data. Here is the
/// current status of the file:
///
/// Machine:
///
/// EXG [ ] FMOVE [ ] FSMOVE [ ] FDMOVE [ ] FMOVEM [ ]
/// LEA [~] PEA [ ] MOVE [~] MOVE16 [ ] MOVEA [ ]
/// MOVEM [ ] MOVEP [ ] MOVEQ [ ] LINK [~] UNLK [~]
///
/// Pseudo:
///
/// MOVI [x] MOVSX [x] MOVZX [x] MOVX [x]
///
/// Map:
///
/// [ ] - was not touched at all
/// [!] - requires extarnal stuff implemented
/// [~] - in progress but usable
/// [x] - done
///
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// MOVE
//===----------------------------------------------------------------------===//
/// -----------------------------------------------------
/// F E | D C | B A 9 | 8 7 6 | 5 4 3 | 2 1 0
/// -----------------------------------------------------
/// | | DESTINATION | SOURCE
/// 0 0 | SIZE | REG | MODE | MODE | REG
/// -----------------------------------------------------
///
/// NOTE Move requires EA X version for direct register destination(0)
// MOVE has a different size encoding.
class MxMoveSize<bits<2> value> {
bits<2> Value = value;
}
def MxMoveSize8 : MxMoveSize<0b01>;
def MxMoveSize16 : MxMoveSize<0b11>;
def MxMoveSize32 : MxMoveSize<0b10>;
class MxMoveEncoding<MxMoveSize size, MxEncMemOp dst_enc, MxEncMemOp src_enc> {
dag Value = (ascend
(descend 0b00, size.Value,
!cond(
!eq(!getdagop(dst_enc.EA), descend): !setdagop(dst_enc.EA, ascend),
!eq(!getdagop(dst_enc.EA), ascend): !setdagop(dst_enc.EA, descend)),
src_enc.EA),
// Source extension
src_enc.Supplement,
// Destination extension
dst_enc.Supplement
);
}
// Special encoding for Xn
class MxMoveEncAddrMode_r<string reg_opnd> : MxEncMemOp {
let EA = (descend (descend 0b00, (slice "$"#reg_opnd, 3, 3)),
(operand "$"#reg_opnd, 3));
}
// TODO: Generalize and adopt this utility in other .td files as well.
multiclass MxMoveOperandEncodings<string opnd_name> {
// Dn
def MxMove#NAME#OpEnc_d : MxEncAddrMode_d<opnd_name>;
// An
def MxMove#NAME#OpEnc_a : MxEncAddrMode_a<opnd_name>;
// Xn
def MxMove#NAME#OpEnc_r : MxMoveEncAddrMode_r<opnd_name>;
// (An)+
def MxMove#NAME#OpEnc_o : MxEncAddrMode_o<opnd_name>;
// -(An)
def MxMove#NAME#OpEnc_e : MxEncAddrMode_e<opnd_name>;
// (i,PC,Xn)
def MxMove#NAME#OpEnc_k : MxEncAddrMode_k<opnd_name>;
// (i,PC)
def MxMove#NAME#OpEnc_q : MxEncAddrMode_q<opnd_name>;
// (i,An,Xn)
def MxMove#NAME#OpEnc_f : MxEncAddrMode_f<opnd_name>;
// (i,An)
def MxMove#NAME#OpEnc_p : MxEncAddrMode_p<opnd_name>;
// (ABS).L
def MxMove#NAME#OpEnc_b : MxEncAddrMode_abs<opnd_name, /*W/L=*/true>;
// (An)
def MxMove#NAME#OpEnc_j : MxEncAddrMode_j<opnd_name>;
}
defm Src : MxMoveOperandEncodings<"src">;
defm Dst : MxMoveOperandEncodings<"dst">;
defvar MxMoveSupportedAMs = ["o", "e", "k", "q", "f", "p", "b", "j"];
let Defs = [CCR] in
class MxMove<string size, dag outs, dag ins, list<dag> pattern, MxMoveEncoding enc>
: MxInst<outs, ins, "move."#size#"\t$src, $dst", pattern> {
let Inst = enc.Value;
}
// R <- R
class MxMove_RR<MxType TYPE, string DST_REG, string SRC_REG,
MxMoveEncoding ENC,
MxOpBundle DST = !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#DST_REG),
MxOpBundle SRC = !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#SRC_REG)>
: MxMove<TYPE.Prefix,
(outs DST.Op:$dst), (ins SRC.Op:$src),
[(null_frag)], ENC>;
foreach DST_REG = ["r", "a"] in {
foreach SRC_REG = ["r", "a"] in
foreach TYPE = [MxType16, MxType32] in
def MOV # TYPE.Size # DST_REG # SRC_REG # TYPE.Postfix
: MxMove_RR<TYPE, DST_REG, SRC_REG,
MxMoveEncoding<!cast<MxMoveSize>("MxMoveSize"#TYPE.Size),
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#DST_REG),
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#SRC_REG)>>;
} // foreach DST_REG
foreach TYPE = [MxType8, MxType16, MxType32] in
def MOV # TYPE.Size # dd # TYPE.Postfix
: MxMove_RR<TYPE, "d", "d",
MxMoveEncoding<!cast<MxMoveSize>("MxMoveSize"#TYPE.Size),
MxMoveDstOpEnc_d, MxMoveSrcOpEnc_d>>;
// M <- R
let mayStore = 1 in {
class MxMove_MR<MxType TYPE, MxOpBundle DST, string SRC_REG, MxMoveEncoding ENC,
MxOpBundle SRC = !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#SRC_REG)>
: MxMove<TYPE.Prefix, (outs), (ins DST.Op:$dst, SRC.Op:$src),
[(store TYPE.VT:$src, DST.Pat:$dst)], ENC>;
class MxMove_MI<MxType TYPE, MxOpBundle DST, MxMoveEncoding ENC,
MxImmOpBundle SRC = !cast<MxImmOpBundle>("MxOp"#TYPE.Size#"AddrMode_i")>
: MxMove<TYPE.Prefix, (outs), (ins DST.Op:$dst, SRC.Op:$src),
[(store SRC.ImmPat:$src, DST.Pat:$dst)], ENC>;
} // let mayStore = 1
foreach REG = ["r", "a", "d"] in
foreach AM = MxMoveSupportedAMs in {
foreach TYPE = !if(!eq(REG, "d"), [MxType8, MxType16, MxType32], [MxType16, MxType32]) in
def MOV # TYPE.Size # AM # REG # TYPE.Postfix
: MxMove_MR<TYPE, !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#AM), REG,
MxMoveEncoding<!cast<MxMoveSize>("MxMoveSize"#TYPE.Size),
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#AM),
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#REG)>>;
} // foreach AM
foreach AM = MxMoveSupportedAMs in {
foreach TYPE = [MxType8, MxType16, MxType32] in
def MOV # TYPE.Size # AM # i # TYPE.Postfix
: MxMove_MI<TYPE, !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#AM),
MxMoveEncoding<!cast<MxMoveSize>("MxMoveSize"#TYPE.Size),
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#AM),
MxEncAddrMode_i<"src", TYPE.Size>>>;
} // foreach AM
// R <- I
// No pattern, as all immediate -> register moves are matched to the MOVI pseudo
class MxMove_RI<MxType TYPE, string DST_REG, MxMoveEncoding ENC,
MxImmOpBundle SRC = !cast<MxImmOpBundle>("MxOp"#TYPE.Size#"AddrMode_i"),
MxOpBundle DST = !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#DST_REG)>
: MxMove<TYPE.Prefix, (outs DST.Op:$dst), (ins SRC.Op:$src),
[(null_frag)], ENC>;
foreach REG = ["r", "a", "d"] in {
foreach TYPE = !if(!eq(REG, "d"), [MxType8, MxType16, MxType32], [MxType16, MxType32]) in
def MOV # TYPE.Size # REG # i # TYPE.Postfix
: MxMove_RI<TYPE, REG,
MxMoveEncoding<!cast<MxMoveSize>("MxMoveSize"#TYPE.Size),
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#REG),
MxEncAddrMode_i<"src", TYPE.Size>>>;
} // foreach REG
// R <- M
let mayLoad = 1 in
class MxMove_RM<MxType TYPE, string DST_REG, MxOpBundle SRC, MxEncMemOp SRC_ENC,
MxMoveSize SIZE_ENC = !cast<MxMoveSize>("MxMoveSize"#TYPE.Size),
MxOpBundle DST = !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#DST_REG),
MxEncMemOp DST_ENC = !cast<MxEncMemOp>("MxMoveDstOpEnc_"#DST_REG)>
: MxMove<TYPE.Prefix, (outs DST.Op:$dst), (ins SRC.Op:$src),
[(set TYPE.VT:$dst, (TYPE.Load SRC.Pat:$src))],
MxMoveEncoding<SIZE_ENC, DST_ENC, SRC_ENC>>;
foreach REG = ["r", "a", "d"] in
foreach AM = MxMoveSupportedAMs in {
foreach TYPE = !if(!eq(REG, "d"), [MxType8, MxType16, MxType32], [MxType16, MxType32]) in
def MOV # TYPE.Size # REG # AM # TYPE.Postfix
: MxMove_RM<TYPE, REG, !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#AM),
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#AM)>;
} // foreach AM
// Tail call version
let Pattern = [(null_frag)] in {
foreach REG = ["r", "a"] in
foreach AM = MxMoveSupportedAMs in {
foreach TYPE = [MxType16, MxType32] in
def MOV # TYPE.Size # REG # AM # _TC
: MxMove_RM<TYPE, REG, !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#AM),
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#AM)> {
let isCodeGenOnly = true;
}
} // foreach AM
} // let Pattern
let mayLoad = 1, mayStore = 1 in
class MxMove_MM<MxType TYPE, MxOpBundle DST, MxOpBundle SRC,
MxEncMemOp DST_ENC, MxEncMemOp SRC_ENC>
: MxMove<TYPE.Prefix, (outs), (ins DST.Op:$dst, SRC.Op:$src),
[(store (TYPE.Load SRC.Pat:$src), DST.Pat:$dst)],
MxMoveEncoding<!cast<MxMoveSize>("MxMoveSize"#TYPE.Size),
DST_ENC, SRC_ENC>>;
foreach DST_AM = MxMoveSupportedAMs in
foreach SRC_AM = MxMoveSupportedAMs in {
foreach TYPE = [MxType8, MxType16, MxType32] in
def MOV # TYPE.Size # DST_AM # SRC_AM # TYPE.Postfix
: MxMove_MM<TYPE, !cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#DST_AM),
!cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#SRC_AM),
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#DST_AM),
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#SRC_AM)>;
} // foreach SRC_AM
// Store ABS(basically pointer) as Immdiate to Mem
def : Pat<(store MxType32.BPat :$src, MxType32.PPat :$dst),
(MOV32pi MxType32.POp :$dst, MxType32.IOp :$src)>;
def : Pat<(store MxType32.BPat :$src, MxType32.FPat :$dst),
(MOV32fi MxType32.FOp :$dst, MxType32.IOp :$src)>;
def : Pat<(store MxType32.BPat :$src, MxType32.BPat :$dst),
(MOV32bi MxType32.BOp :$dst, MxType32.IOp :$src)>;
def : Pat<(store MxType32.BPat :$src, MxType32.JPat :$dst),
(MOV32ji MxType32.JOp :$dst, MxType32.IOp :$src)>;
//===----------------------------------------------------------------------===//
// MOVEQ
//===----------------------------------------------------------------------===//
/// ------------+---------+---+-----------------------
/// F E D C | B A 9 | 8 | 7 6 5 4 3 2 1 0
/// ------------+---------+---+-----------------------
/// 0 1 1 1 | REG | 0 | DATA
/// ------------+---------+---+-----------------------
// No pattern, as all immediate -> register moves are matched to the MOVI pseudo
let Defs = [CCR] in
def MOVQ : MxInst<(outs MxDRD32:$dst), (ins Mxi8imm:$imm),
"moveq\t$imm, $dst",
[(null_frag)]> {
let Inst = (descend 0b0111, (operand "$dst", 3), 0b0, (operand "$imm", 8));
}
//===----------------------------------------------------------------------===//
// MOVEM
//
// The mask is already pre-processed by the save/restore spill hook
//===----------------------------------------------------------------------===//
// Direction
defvar MxMOVEM_MR = false;
defvar MxMOVEM_RM = true;
// Size
defvar MxMOVEM_W = false;
defvar MxMOVEM_L = true;
/// ---------------+-------------+-------------+---------
/// F E D C B | A | 9 8 7 | 6 | 5 4 3 | 2 1 0
/// ---------------+---+---------+---+---------+---------
/// 0 1 0 0 1 | D | 0 0 1 | S | MODE | REG
/// ---------------+---+---------+---+---------+---------
/// REGISTER LIST MASK
/// -----------------------------------------------------
/// D - direction(RM,MR)
/// S - size(W,L)
class MxMOVEMEncoding<MxEncMemOp opnd_enc, bit size, bit direction,
string mask_op_name> {
dag Value = (ascend
(descend 0b01001, direction, 0b001, size, opnd_enc.EA),
// Mask
(operand "$"#mask_op_name, 16),
opnd_enc.Supplement
);
}
let mayStore = 1 in
class MxMOVEM_MR<MxType TYPE, bit SIZE_ENC,
MxOperand MEMOp, MxEncMemOp MEM_ENC,
MxOp MASKOp>
: MxInst<(outs), (ins MEMOp:$dst, MASKOp:$mask),
"movem."#TYPE.Prefix#"\t$mask, $dst", []> {
let Inst = MxMOVEMEncoding<MEM_ENC, SIZE_ENC, MxMOVEM_MR, "mask">.Value;
}
foreach AM = MxMoveSupportedAMs in {
foreach TYPE = [MxType16, MxType32] in
def MOVM # TYPE.Size # AM # m # TYPE.Postfix
: MxMOVEM_MR<TYPE, !if(!eq(TYPE, MxType16), MxMOVEM_W, MxMOVEM_L),
!cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#AM).Op,
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#AM),
!if(!eq(AM, "e"), MxInverseMoveMask, MxMoveMask)>;
} // foreach AM
let mayLoad = 1 in
class MxMOVEM_RM<MxType TYPE, bit SIZE_ENC,
MxOperand MEMOp, MxEncMemOp MEM_ENC,
MxOp MASKOp>
: MxInst<(outs), (ins MASKOp:$mask, MEMOp:$src),
"movem."#TYPE.Prefix#"\t$src, $mask", []> {
let Inst = MxMOVEMEncoding<MEM_ENC, SIZE_ENC, MxMOVEM_RM, "mask">.Value;
}
foreach AM = MxMoveSupportedAMs in {
foreach TYPE = [MxType16, MxType32] in
def MOVM # TYPE.Size # m # AM # TYPE.Postfix
: MxMOVEM_RM<TYPE, !if(!eq(TYPE, MxType16), MxMOVEM_W, MxMOVEM_L),
!cast<MxOpBundle>("MxOp"#TYPE.Size#"AddrMode_"#AM).Op,
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#AM),
!if(!eq(AM, "e"), MxInverseMoveMask, MxMoveMask)>;
} // foreach AM
// Pseudo versions. These a required by virtual register spill/restore since
// the mask requires real register to encode. These instruction will be expanded
// into real MOVEM after RA finishes.
let mayStore = 1 in
class MxMOVEM_MR_Pseudo<MxType TYPE, MxOperand MEMOp>
: MxPseudo<(outs), (ins MEMOp:$dst, TYPE.ROp:$reg)>;
let mayLoad = 1 in
class MxMOVEM_RM_Pseudo<MxType TYPE, MxOperand MEMOp>
: MxPseudo<(outs TYPE.ROp:$dst), (ins MEMOp:$src)>;
// Mem <- Reg
def MOVM16jm_P : MxMOVEM_MR_Pseudo<MxType16r, MxType16.JOp>;
def MOVM32jm_P : MxMOVEM_MR_Pseudo<MxType32r, MxType32.JOp>;
def MOVM16pm_P : MxMOVEM_MR_Pseudo<MxType16r, MxType16.POp>;
def MOVM32pm_P : MxMOVEM_MR_Pseudo<MxType32r, MxType32.POp>;
// Reg <- Mem
def MOVM16mj_P : MxMOVEM_RM_Pseudo<MxType16r, MxType16.JOp>;
def MOVM32mj_P : MxMOVEM_RM_Pseudo<MxType32r, MxType32.JOp>;
def MOVM16mp_P : MxMOVEM_RM_Pseudo<MxType16r, MxType16.POp>;
def MOVM32mp_P : MxMOVEM_RM_Pseudo<MxType32r, MxType32.POp>;
//===----------------------------------------------------------------------===//
// MOVE to/from SR/CCR
//
// A special care must be taken working with to/from CCR since it is basically
// word-size SR register truncated for user mode thus it only supports word-size
// instructions. Plus the original M68000 does not support moves from CCR. So in
// order to use CCR effectively one MUST use proper byte-size pseudo instructi-
// ons that will be resolved sometime after RA pass.
//===----------------------------------------------------------------------===//
/// Move to CCR
/// --------------------------------------------------
/// F E D C B A 9 8 7 6 | 5 4 3 | 2 1 0
/// --------------------------------------------------
/// | EFFECTIVE ADDRESS
/// 0 1 0 0 0 1 0 0 1 1 | MODE | REG
/// --------------------------------------------------
let Defs = [CCR] in {
class MxMoveToCCR<MxOperand MEMOp, MxEncMemOp SRC_ENC>
: MxInst<(outs CCRC:$dst), (ins MEMOp:$src), "move.w\t$src, $dst", []> {
let Inst = (ascend
(descend 0b0100010011, SRC_ENC.EA),
SRC_ENC.Supplement
);
}
class MxMoveToCCRPseudo<MxOperand MEMOp>
: MxPseudo<(outs CCRC:$dst), (ins MEMOp:$src)>;
} // let Defs = [CCR]
let mayLoad = 1 in
foreach AM = MxMoveSupportedAMs in {
def MOV16c # AM : MxMoveToCCR<!cast<MxOpBundle>("MxOp16AddrMode_"#AM).Op,
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#AM)>;
def MOV8c # AM : MxMoveToCCRPseudo<!cast<MxOpBundle>("MxOp8AddrMode_"#AM).Op>;
} // foreach AM
// Only data register is allowed.
def MOV16cd : MxMoveToCCR<MxOp16AddrMode_d.Op, MxMoveSrcOpEnc_d>;
def MOV8cd : MxMoveToCCRPseudo<MxOp8AddrMode_d.Op>;
/// Move from CCR
/// --------------------------------------------------
/// F E D C B A 9 8 7 6 | 5 4 3 | 2 1 0
/// --------------------------------------------------
/// | EFFECTIVE ADDRESS
/// 0 1 0 0 0 0 1 0 1 1 | MODE | REG
/// --------------------------------------------------
let Uses = [CCR] in {
class MxMoveFromCCR_R
: MxInst<(outs MxDRD16:$dst), (ins CCRC:$src), "move.w\t$src, $dst", []>,
Requires<[ AtLeastM68010 ]> {
let Inst = (descend 0b0100001011, MxEncAddrMode_d<"dst">.EA);
}
class MxMoveFromCCR_M<MxOperand MEMOp, MxEncMemOp DST_ENC>
: MxInst<(outs), (ins MEMOp:$dst, CCRC:$src), "move.w\t$src, $dst", []>,
Requires<[ AtLeastM68010 ]> {
let Inst = (ascend
(descend 0b0100001011, DST_ENC.EA),
DST_ENC.Supplement
);
}
class MxMoveFromCCRPseudo<MxOperand MEMOp>
: MxPseudo<(outs), (ins MEMOp:$dst, CCRC:$src)>;
class MxMoveFromCCR_RPseudo<MxOperand MEMOp>
: MxPseudo<(outs MEMOp:$dst), (ins CCRC:$src)>;
} // let Uses = [CCR]
let mayStore = 1 in
foreach AM = MxMoveSupportedAMs in {
def MOV16 # AM # c
: MxMoveFromCCR_M<!cast<MxOpBundle>("MxOp16AddrMode_"#AM).Op,
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#AM)>;
def MOV8 # AM # c
: MxMoveFromCCRPseudo<!cast<MxOpBundle>("MxOp8AddrMode_"#AM).Op>;
} // foreach AM
// Only data register is allowed.
def MOV16dc : MxMoveFromCCR_R;
def MOV8dc : MxMoveFromCCR_RPseudo<MxOp8AddrMode_d.Op>;
/// Move to SR
/// --------------------------------------------------
/// F E D C B A 9 8 7 6 | 5 4 3 | 2 1 0
/// --------------------------------------------------
/// | EFFECTIVE ADDRESS
/// 0 1 0 0 0 1 1 0 1 1 | MODE | REG
/// --------------------------------------------------
let Defs = [SR] in {
class MxMoveToSR<MxOperand MEMOp, MxEncMemOp SRC_ENC>
: MxInst<(outs SRC:$dst), (ins MEMOp:$src), "move.w\t$src, $dst", []> {
let Inst = (ascend
(descend 0b0100011011, SRC_ENC.EA),
SRC_ENC.Supplement
);
}
} // let Defs = [SR]
let mayLoad = 1 in
foreach AM = MxMoveSupportedAMs in {
def MOV16s # AM : MxMoveToSR<!cast<MxOpBundle>("MxOp16AddrMode_"#AM).Op,
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#AM)>;
} // foreach AM
def MOV16sd : MxMoveToSR<MxOp16AddrMode_d.Op, MxMoveSrcOpEnc_d>;
/// Move from SR
/// --------------------------------------------------
/// F E D C B A 9 8 7 6 | 5 4 3 | 2 1 0
/// --------------------------------------------------
/// | EFFECTIVE ADDRESS
/// 0 1 0 0 0 0 0 0 1 1 | MODE | REG
/// --------------------------------------------------
let Uses = [SR] in {
class MxMoveFromSR_R
: MxInst<(outs MxDRD16:$dst), (ins SRC:$src), "move.w\t$src, $dst", []>,
Requires<[ AtLeastM68010 ]> {
let Inst = (descend 0b0100000011, MxEncAddrMode_d<"dst">.EA);
}
class MxMoveFromSR_M<MxOperand MEMOp, MxEncMemOp DST_ENC>
: MxInst<(outs), (ins MEMOp:$dst, SRC:$src), "move.w\t$src, $dst", []>,
Requires<[ AtLeastM68010 ]> {
let Inst = (ascend
(descend 0b0100000011, DST_ENC.EA),
DST_ENC.Supplement
);
}
} // let Uses = [SR]
let mayStore = 1 in
foreach AM = MxMoveSupportedAMs in {
def MOV16 # AM # s
: MxMoveFromSR_M<!cast<MxOpBundle>("MxOp16AddrMode_"#AM).Op,
!cast<MxEncMemOp>("MxMoveDstOpEnc_"#AM)>;
} // foreach AM
def MOV16ds : MxMoveFromSR_R;
//===----------------------------------------------------------------------===//
// LEA
//===----------------------------------------------------------------------===//
/// ----------------------------------------------------
/// F E D C | B A 9 | 8 7 6 | 5 4 3 | 2 1 0
/// ----------------------------------------------------
/// 0 1 0 0 | DST REG | 1 1 1 | MODE | REG
/// ----------------------------------------------------
class MxLEA<MxOpBundle SRC, MxEncMemOp SRC_ENC>
: MxInst<(outs MxARD32:$dst), (ins SRC.Op:$src),
"lea\t$src, $dst", [(set i32:$dst, SRC.Pat:$src)]> {
let Inst = (ascend
(descend 0b0100, (operand "$dst", 3), 0b111, SRC_ENC.EA),
SRC_ENC.Supplement
);
}
foreach AM = ["p", "f", "b", "q", "k"] in
def LEA32 # AM : MxLEA<!cast<MxOpBundle>("MxOp32AddrMode_"#AM),
!cast<MxEncMemOp>("MxMoveSrcOpEnc_"#AM)>;
//===----------------------------------------------------------------------===//
// LINK/UNLK
//===----------------------------------------------------------------------===//
let Uses = [SP], Defs = [SP] in {
let mayStore = 1 in {
def LINK16 : MxInst<(outs), (ins MxARD16:$src, Mxi16imm:$disp), "link.w\t$src, $disp", []> {
let Inst = (ascend
(descend 0b0100111001010, (operand "$src", 3)),
(operand "$disp", 16)
);
}
def LINK32 : MxInst<(outs), (ins MxARD16:$src, Mxi32imm:$disp), "link.l\t$src, $disp", []> {
let Inst = (ascend
(descend 0b0100100000001, (operand "$src", 3)),
(slice "$disp", 31, 16),
(slice "$disp", 15, 0)
);
}
def UNLK : MxInst<(outs), (ins MxARD32:$src), "unlk\t$src", []> {
let Inst = (descend 0b0100111001011, (operand "$src", 3));
}
} // let mayStore = 1
} // let Uses = [SP], Defs = [SP]
//===----------------------------------------------------------------------===//
// Pseudos
//===----------------------------------------------------------------------===//
/// Pushe/Pop to/from SP for simplicity
let Uses = [SP], Defs = [SP], hasSideEffects = 0 in {
// SP <- SP - <size>; (SP) <- Dn
let mayStore = 1 in {
def PUSH8d : MxPseudo<(outs), (ins DR8:$reg)>;
def PUSH16d : MxPseudo<(outs), (ins DR16:$reg)>;
def PUSH32r : MxPseudo<(outs), (ins XR32:$reg)>;
} // let mayStore = 1
// Dn <- (SP); SP <- SP + <size>
let mayLoad = 1 in {
def POP8d : MxPseudo<(outs DR8:$reg), (ins)>;
def POP16d : MxPseudo<(outs DR16:$reg), (ins)>;
def POP32r : MxPseudo<(outs XR32:$reg), (ins)>;
} // let mayLoad = 1
} // let Uses/Defs = [SP], hasSideEffects = 0
let Defs = [CCR] in {
class MxPseudoMove_RR<MxType DST, MxType SRC, list<dag> PAT = []>
: MxPseudo<(outs DST.ROp:$dst), (ins SRC.ROp:$src), PAT>;
class MxPseudoMove_RM<MxType DST, MxOperand SRCOpd, list<dag> PAT = []>
: MxPseudo<(outs DST.ROp:$dst), (ins SRCOpd:$src), PAT>;
// These Pseudos handle loading immediates to registers.
// They are expanded post-RA into either move or moveq instructions,
// depending on size, destination register class, and immediate value.
// This is done with pseudoinstructions in order to not constrain RA to
// data registers if moveq matches.
class MxPseudoMove_DI<MxType TYPE>
: MxPseudo<(outs TYPE.ROp:$dst), (ins TYPE.IOp:$src),
[(set TYPE.ROp:$dst, imm:$src)]>;
// i8 imm -> reg can always be converted to moveq,
// but we still emit a pseudo for consistency.
def MOVI8di : MxPseudoMove_DI<MxType8d>;
def MOVI16ri : MxPseudoMove_DI<MxType16r>;
def MOVI32ri : MxPseudoMove_DI<MxType32r>;
} // let Defs = [CCR]
/// This group of Pseudos is analogues to the real x86 extending moves, but
/// since M68k does not have those we need to emulate. These instructions
/// will be expanded right after RA completed because we need to know precisely
/// what registers are allocated for the operands and if they overlap we just
/// extend the value if the registers are completely different we need to move
/// first.
foreach EXT = ["S", "Z"] in {
let hasSideEffects = 0 in {
def MOV#EXT#Xd16d8 : MxPseudoMove_RR<MxType16d, MxType8d>;
def MOV#EXT#Xd32d8 : MxPseudoMove_RR<MxType32d, MxType8d>;
def MOV#EXT#Xd32d16 : MxPseudoMove_RR<MxType32r, MxType16r>;
let mayLoad = 1 in {
def MOV#EXT#Xd16j8 : MxPseudoMove_RM<MxType16d, MxType8.JOp>;
def MOV#EXT#Xd32j8 : MxPseudoMove_RM<MxType32d, MxType8.JOp>;
def MOV#EXT#Xd32j16 : MxPseudoMove_RM<MxType32d, MxType16.JOp>;
def MOV#EXT#Xd16p8 : MxPseudoMove_RM<MxType16d, MxType8.POp>;
def MOV#EXT#Xd32p8 : MxPseudoMove_RM<MxType32d, MxType8.POp>;
def MOV#EXT#Xd32p16 : MxPseudoMove_RM<MxType32d, MxType16.POp>;
def MOV#EXT#Xd16f8 : MxPseudoMove_RM<MxType16d, MxType8.FOp>;
def MOV#EXT#Xd32f8 : MxPseudoMove_RM<MxType32d, MxType8.FOp>;
def MOV#EXT#Xd32f16 : MxPseudoMove_RM<MxType32d, MxType16.FOp>;
def MOV#EXT#Xd16q8 : MxPseudoMove_RM<MxType16d, MxType8.QOp>;
def MOV#EXT#Xd32q8 : MxPseudoMove_RM<MxType32d, MxType8.QOp>;
def MOV#EXT#Xd32q16 : MxPseudoMove_RM<MxType32d, MxType16.QOp>;
}
}
}
/// This group of instructions is similar to the group above but DOES NOT do
/// any value extension, they just load a smaller register into the lower part
/// of another register if operands' real registers are different or does
/// nothing if they are the same.
def MOVXd16d8 : MxPseudoMove_RR<MxType16d, MxType8d>;
def MOVXd32d8 : MxPseudoMove_RR<MxType32d, MxType8d>;
def MOVXd32d16 : MxPseudoMove_RR<MxType32r, MxType16r>;
//===----------------------------------------------------------------------===//
// Extend/Truncate Patterns
//===----------------------------------------------------------------------===//
// i16 <- sext i8
def: Pat<(i16 (sext i8:$src)),
(EXTRACT_SUBREG (MOVSXd32d8 MxDRD8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxSExtLoadi16i8 MxCP_ARI:$src),
(EXTRACT_SUBREG (MOVSXd32j8 MxARI8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxSExtLoadi16i8 MxCP_ARID:$src),
(EXTRACT_SUBREG (MOVSXd32p8 MxARID8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxSExtLoadi16i8 MxCP_ARII:$src),
(EXTRACT_SUBREG (MOVSXd32f8 MxARII8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxSExtLoadi16i8 MxCP_PCD:$src), (MOVSXd16q8 MxPCD8:$src)>;
// i32 <- sext i8
def: Pat<(i32 (sext i8:$src)), (MOVSXd32d8 MxDRD8:$src)>;
def: Pat<(MxSExtLoadi32i8 MxCP_ARI :$src), (MOVSXd32j8 MxARI8 :$src)>;
def: Pat<(MxSExtLoadi32i8 MxCP_ARID:$src), (MOVSXd32p8 MxARID8:$src)>;
def: Pat<(MxSExtLoadi32i8 MxCP_ARII:$src), (MOVSXd32f8 MxARII8:$src)>;
def: Pat<(MxSExtLoadi32i8 MxCP_PCD:$src), (MOVSXd32q8 MxPCD8:$src)>;
// i32 <- sext i16
def: Pat<(i32 (sext i16:$src)), (MOVSXd32d16 MxDRD16:$src)>;
def: Pat<(MxSExtLoadi32i16 MxCP_ARI :$src), (MOVSXd32j16 MxARI16 :$src)>;
def: Pat<(MxSExtLoadi32i16 MxCP_ARID:$src), (MOVSXd32p16 MxARID16:$src)>;
def: Pat<(MxSExtLoadi32i16 MxCP_ARII:$src), (MOVSXd32f16 MxARII16:$src)>;
def: Pat<(MxSExtLoadi32i16 MxCP_PCD:$src), (MOVSXd32q16 MxPCD16:$src)>;
// i16 <- zext i8
def: Pat<(i16 (zext i8:$src)),
(EXTRACT_SUBREG (MOVZXd32d8 MxDRD8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxZExtLoadi16i8 MxCP_ARI:$src),
(EXTRACT_SUBREG (MOVZXd32j8 MxARI8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxZExtLoadi16i8 MxCP_ARID:$src),
(EXTRACT_SUBREG (MOVZXd32p8 MxARID8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxZExtLoadi16i8 MxCP_ARII:$src),
(EXTRACT_SUBREG (MOVZXd32f8 MxARII8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxZExtLoadi16i8 MxCP_PCD :$src), (MOVZXd16q8 MxPCD8 :$src)>;
// i32 <- zext i8
def: Pat<(i32 (zext i8:$src)), (MOVZXd32d8 MxDRD8:$src)>;
def: Pat<(MxZExtLoadi32i8 MxCP_ARI :$src), (MOVZXd32j8 MxARI8 :$src)>;
def: Pat<(MxZExtLoadi32i8 MxCP_ARID:$src), (MOVZXd32p8 MxARID8:$src)>;
def: Pat<(MxZExtLoadi32i8 MxCP_ARII:$src), (MOVZXd32f8 MxARII8:$src)>;
def: Pat<(MxZExtLoadi32i8 MxCP_PCD :$src), (MOVZXd32q8 MxPCD8 :$src)>;
// i32 <- zext i16
def: Pat<(i32 (zext i16:$src)), (MOVZXd32d16 MxDRD16:$src)>;
def: Pat<(MxZExtLoadi32i16 MxCP_ARI :$src), (MOVZXd32j16 MxARI16 :$src)>;
def: Pat<(MxZExtLoadi32i16 MxCP_ARID:$src), (MOVZXd32p16 MxARID16:$src)>;
def: Pat<(MxZExtLoadi32i16 MxCP_ARII:$src), (MOVZXd32f16 MxARII16:$src)>;
def: Pat<(MxZExtLoadi32i16 MxCP_PCD :$src), (MOVZXd32q16 MxPCD16 :$src)>;
// i16 <- anyext i8
def: Pat<(i16 (anyext i8:$src)),
(EXTRACT_SUBREG (MOVZXd32d8 MxDRD8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxExtLoadi16i8 MxCP_ARI:$src),
(EXTRACT_SUBREG (MOVZXd32j8 MxARI8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxExtLoadi16i8 MxCP_ARID:$src),
(EXTRACT_SUBREG (MOVZXd32p8 MxARID8:$src), MxSubRegIndex16Lo)>;
def: Pat<(MxExtLoadi16i8 MxCP_ARII:$src),
(EXTRACT_SUBREG (MOVZXd32f8 MxARII8:$src), MxSubRegIndex16Lo)>;
// i32 <- anyext i8
def: Pat<(i32 (anyext i8:$src)), (MOVZXd32d8 MxDRD8:$src)>;
def: Pat<(MxExtLoadi32i8 MxCP_ARI :$src), (MOVZXd32j8 MxARI8 :$src)>;
def: Pat<(MxExtLoadi32i8 MxCP_ARID:$src), (MOVZXd32p8 MxARID8:$src)>;
def: Pat<(MxExtLoadi32i8 MxCP_ARII:$src), (MOVZXd32f8 MxARII8:$src)>;
// i32 <- anyext i16
def: Pat<(i32 (anyext i16:$src)), (MOVZXd32d16 MxDRD16:$src)>;
def: Pat<(MxExtLoadi32i16 MxCP_ARI :$src), (MOVZXd32j16 MxARI16 :$src)>;
def: Pat<(MxExtLoadi32i16 MxCP_ARID:$src), (MOVZXd32p16 MxARID16:$src)>;
def: Pat<(MxExtLoadi32i16 MxCP_ARII:$src), (MOVZXd32f16 MxARII16:$src)>;
// trunc patterns
def : Pat<(i16 (trunc i32:$src)),
(EXTRACT_SUBREG MxXRD32:$src, MxSubRegIndex16Lo)>;
def : Pat<(i8 (trunc i32:$src)),
(EXTRACT_SUBREG MxXRD32:$src, MxSubRegIndex8Lo)>;
def : Pat<(i8 (trunc i16:$src)),
(EXTRACT_SUBREG MxXRD16:$src, MxSubRegIndex8Lo)>;
//===----------------------------------------------------------------------===//
// FMOVE
//===----------------------------------------------------------------------===//
let Defs = [FPS] in
class MxFMove<string size, dag outs, dag ins, list<dag> pattern,
string rounding = "">
: MxInst<outs, ins,
"f"#rounding#"move."#size#"\t$src, $dst", pattern> {
// Only FMOVE uses FPC
let Uses = !if(!eq(rounding, ""), [FPC], []);
// FSMOVE and FDMOVE are only available after M68040
let Predicates = [!if(!eq(rounding, ""), AtLeastM68881, AtLeastM68040)];
}
// FPDR <- FPDR
class MxFMove_FF<string rounding, int size,
MxOpBundle Opnd = !cast<MxOpBundle>("MxOp"#size#"AddrMode_fpr")>
: MxFMove<"x", (outs Opnd.Op:$dst), (ins Opnd.Op:$src),
[(null_frag)], rounding> {
let Inst = (ascend
(descend 0b1111,
/*COPROCESSOR ID*/0b001,
0b000,
/*MODE + REGISTER*/0b000000
),
(descend 0b0, /* R/M */0b0, 0b0,
/*SOURCE SPECIFIER*/
(operand "$src", 3),
/*DESTINATION*/
(operand "$dst", 3),
/*OPMODE*/
!cond(!eq(rounding, "s"): 0b1000000,
!eq(rounding, "d"): 0b1000100,
true: 0b0000000)
)
);
}
foreach rounding = ["", "s", "d"] in {
def F # !toupper(rounding) # MOV80fp_fp : MxFMove_FF<rounding, 80>;
// We don't have `fmove.s` or `fmove.d` because values will be converted to
// f80 upon storing into the register, but FMOV32/64fp_fp are still needed
// to make codegen easier.
let isCodeGenOnly = true in
foreach size = [32, 64] in
def F # !toupper(rounding) # MOV # size # fp_fp : MxFMove_FF<rounding, size>;
}
// Direction
defvar MxFMove_FP_EA = false;
defvar MxFMove_EA_FP = true;
// Encoding scheme for FPSYS <-> R/M
class MxEncFSysMove<bit dir, MxEncMemOp EAEnc, string fsys_reg> {
dag Value = (ascend
(descend 0b1111,
/*COPROCESSOR ID*/0b001,
0b000,
/*MODE + REGISTER*/
EAEnc.EA
),
(descend 0b10, /*dir*/ dir,
/*REGISTER SELECT*/
(operand "$"#fsys_reg, 3, (encoder "encodeFPSYSSelect")),
0b0000000000
)
);
}
// FPSYS <-> R
class MxFMove_FSYS_R<string src_reg,
MxOpBundle SrcOpnd = !cast<MxOpBundle>("MxOp32AddrMode_"#src_reg),
MxOpBundle DstOpnd = !cond(!eq(src_reg, "d"): MxOp32AddrMode_fpcs,
!eq(src_reg, "a"): MxOp32AddrMode_fpi),
MxEncMemOp SrcEnc = !cast<MxEncMemOp>("MxMoveSrcOpEnc_"#src_reg)>
: MxFMove<"l", (outs DstOpnd.Op:$dst), (ins SrcOpnd.Op:$src),
[(null_frag)]> {
let Inst = MxEncFSysMove<MxFMove_FP_EA, SrcEnc, "dst">.Value;
}
class MxFMove_R_FSYS<string dst_reg,
MxOpBundle SrcOpnd = !cond(!eq(dst_reg, "d"): MxOp32AddrMode_fpcs,
!eq(dst_reg, "a"): MxOp32AddrMode_fpi),
MxOpBundle DstOpnd = !cast<MxOpBundle>("MxOp32AddrMode_"#dst_reg),
MxEncMemOp DstEnc = !cast<MxEncMemOp>("MxMoveDstOpEnc_"#dst_reg)>
: MxFMove<"l", (outs DstOpnd.Op:$dst), (ins SrcOpnd.Op:$src),
[(null_frag)]> {
let Inst = MxEncFSysMove<MxFMove_EA_FP, DstEnc, "src">.Value;
}
def FMOVE32fpcs_d : MxFMove_FSYS_R<"d">;
def FMOVE32d_fpcs : MxFMove_R_FSYS<"d">;
def FMOVE32fpi_a : MxFMove_FSYS_R<"a">;
def FMOVE32a_fpi : MxFMove_R_FSYS<"a">;
|